mirror of
https://github.com/serp-ai/bark-with-voice-clone.git
synced 2025-12-15 03:07:58 +01:00
715 lines
26 KiB
Python
715 lines
26 KiB
Python
import contextlib
|
|
import hashlib
|
|
import os
|
|
import re
|
|
import requests
|
|
|
|
from encodec import EncodecModel
|
|
import funcy
|
|
import logging
|
|
import numpy as np
|
|
from scipy.special import softmax
|
|
import torch
|
|
import torch.nn.functional as F
|
|
import tqdm
|
|
from transformers import BertTokenizer
|
|
|
|
from .model import GPTConfig, GPT
|
|
from .model_fine import FineGPT, FineGPTConfig
|
|
|
|
if (
|
|
torch.cuda.is_available() and
|
|
hasattr(torch.cuda, "amp") and
|
|
hasattr(torch.cuda.amp, "autocast") and
|
|
torch.cuda.is_bf16_supported()
|
|
):
|
|
autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16)
|
|
else:
|
|
@contextlib.contextmanager
|
|
def autocast():
|
|
yield
|
|
|
|
|
|
# hold models in global scope to lazy load
|
|
global models
|
|
models = {}
|
|
|
|
|
|
CONTEXT_WINDOW_SIZE = 1024
|
|
|
|
SEMANTIC_RATE_HZ = 49.9
|
|
SEMANTIC_VOCAB_SIZE = 10_000
|
|
|
|
CODEBOOK_SIZE = 1024
|
|
N_COARSE_CODEBOOKS = 2
|
|
N_FINE_CODEBOOKS = 8
|
|
COARSE_RATE_HZ = 75
|
|
|
|
SAMPLE_RATE = 24_000
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
|
default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
|
|
CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
|
|
|
|
|
|
REMOTE_BASE_URL = "https://dl.suno-models.io/bark/models/v0/"
|
|
REMOTE_MODEL_PATHS = {
|
|
"text": {
|
|
"path": os.environ.get("SUNO_TEXT_MODEL_PATH", os.path.join(REMOTE_BASE_URL, "text_2.pt")),
|
|
"checksum": "54afa89d65e318d4f5f80e8e8799026a",
|
|
},
|
|
"coarse": {
|
|
"path": os.environ.get(
|
|
"SUNO_COARSE_MODEL_PATH", os.path.join(REMOTE_BASE_URL, "coarse_2.pt")
|
|
),
|
|
"checksum": "8a98094e5e3a255a5c9c0ab7efe8fd28",
|
|
},
|
|
"fine": {
|
|
"path": os.environ.get("SUNO_FINE_MODEL_PATH", os.path.join(REMOTE_BASE_URL, "fine_2.pt")),
|
|
"checksum": "59d184ed44e3650774a2f0503a48a97b",
|
|
},
|
|
}
|
|
|
|
|
|
if not hasattr(torch.nn.functional, 'scaled_dot_product_attention'):
|
|
logger.warning(
|
|
"torch version does not support flash attention. You will get significantly faster" +
|
|
" inference speed by upgrade torch to newest version / nightly."
|
|
)
|
|
|
|
|
|
def _string_md5(s):
|
|
m = hashlib.md5()
|
|
m.update(s.encode("utf-8"))
|
|
return m.hexdigest()
|
|
|
|
|
|
def _md5(fname):
|
|
hash_md5 = hashlib.md5()
|
|
with open(fname, "rb") as f:
|
|
for chunk in iter(lambda: f.read(4096), b""):
|
|
hash_md5.update(chunk)
|
|
return hash_md5.hexdigest()
|
|
|
|
|
|
def _get_ckpt_path(model_type):
|
|
model_name = _string_md5(REMOTE_MODEL_PATHS[model_type]["path"])
|
|
return os.path.join(CACHE_DIR, f"{model_name}.pt")
|
|
|
|
|
|
S3_BUCKET_PATH_RE = r"s3\:\/\/(.+?)\/"
|
|
|
|
|
|
def _parse_s3_filepath(s3_filepath):
|
|
bucket_name = re.search(S3_BUCKET_PATH_RE, s3_filepath).group(1)
|
|
rel_s3_filepath = re.sub(S3_BUCKET_PATH_RE, "", s3_filepath)
|
|
return bucket_name, rel_s3_filepath
|
|
|
|
|
|
def _download(from_s3_path, to_local_path):
|
|
os.makedirs(CACHE_DIR, exist_ok=True)
|
|
response = requests.get(from_s3_path, stream=True)
|
|
total_size_in_bytes = int(response.headers.get('content-length', 0))
|
|
block_size = 1024 # 1 Kibibyte
|
|
progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
|
|
with open(to_local_path, "wb") as file:
|
|
for data in response.iter_content(block_size):
|
|
progress_bar.update(len(data))
|
|
file.write(data)
|
|
progress_bar.close()
|
|
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
|
|
raise ValueError("ERROR, something went wrong")
|
|
|
|
|
|
class InferenceContext:
|
|
def __init__(self, benchmark=False):
|
|
# we can't expect inputs to be the same length, so disable benchmarking by default
|
|
self._chosen_cudnn_benchmark = benchmark
|
|
self._cudnn_benchmark = None
|
|
|
|
def __enter__(self):
|
|
self._cudnn_benchmark = torch.backends.cudnn.benchmark
|
|
torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark
|
|
|
|
def __exit__(self, exc_type, exc_value, exc_traceback):
|
|
torch.backends.cudnn.benchmark = self._cudnn_benchmark
|
|
|
|
|
|
if torch.cuda.is_available():
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
|
torch.backends.cudnn.allow_tf32 = True
|
|
|
|
|
|
@contextlib.contextmanager
|
|
def _inference_mode():
|
|
with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast():
|
|
yield
|
|
|
|
|
|
def _clear_cuda_cache():
|
|
if torch.cuda.is_available():
|
|
torch.cuda.empty_cache()
|
|
torch.cuda.synchronize()
|
|
|
|
|
|
def clean_models(model_key=None):
|
|
global models
|
|
model_keys = [model_key] if model_key is not None else models.keys()
|
|
for k in model_keys:
|
|
if k in models:
|
|
del models[k]
|
|
_clear_cuda_cache()
|
|
|
|
|
|
def _load_model(ckpt_path, device, model_type="text"):
|
|
if "cuda" not in device:
|
|
logger.warning("No GPU being used. Careful, Inference might be extremely slow!")
|
|
if model_type == "text":
|
|
ConfigClass = GPTConfig
|
|
ModelClass = GPT
|
|
elif model_type == "coarse":
|
|
ConfigClass = GPTConfig
|
|
ModelClass = GPT
|
|
elif model_type == "fine":
|
|
ConfigClass = FineGPTConfig
|
|
ModelClass = FineGPT
|
|
else:
|
|
raise NotImplementedError()
|
|
if (
|
|
os.path.exists(ckpt_path) and
|
|
_md5(ckpt_path) != REMOTE_MODEL_PATHS[model_type]["checksum"]
|
|
):
|
|
logger.warning(f"found outdated {model_type} model, removing...")
|
|
os.remove(ckpt_path)
|
|
if not os.path.exists(ckpt_path):
|
|
logger.info(f"{model_type} model not found, downloading...")
|
|
_download(REMOTE_MODEL_PATHS[model_type]["path"], ckpt_path)
|
|
checkpoint = torch.load(ckpt_path, map_location=device)
|
|
# this is a hack
|
|
model_args = checkpoint["model_args"]
|
|
if "input_vocab_size" not in model_args:
|
|
model_args["input_vocab_size"] = model_args["vocab_size"]
|
|
model_args["output_vocab_size"] = model_args["vocab_size"]
|
|
del model_args["vocab_size"]
|
|
gptconf = ConfigClass(**checkpoint["model_args"])
|
|
model = ModelClass(gptconf)
|
|
state_dict = checkpoint["model"]
|
|
# fixup checkpoint
|
|
unwanted_prefix = "_orig_mod."
|
|
for k, v in list(state_dict.items()):
|
|
if k.startswith(unwanted_prefix):
|
|
state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k)
|
|
extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
|
|
extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")])
|
|
missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
|
|
missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")])
|
|
if len(extra_keys) != 0:
|
|
raise ValueError(f"extra keys found: {extra_keys}")
|
|
if len(missing_keys) != 0:
|
|
raise ValueError(f"missing keys: {missing_keys}")
|
|
model.load_state_dict(state_dict, strict=False)
|
|
n_params = model.get_num_params()
|
|
val_loss = checkpoint["best_val_loss"].item()
|
|
logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss")
|
|
model.eval()
|
|
model.to(device)
|
|
del checkpoint, state_dict
|
|
_clear_cuda_cache()
|
|
if model_type == "text":
|
|
tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
|
|
return {
|
|
"model": model,
|
|
"tokenizer": tokenizer,
|
|
}
|
|
return model
|
|
|
|
|
|
def _load_codec_model(device):
|
|
model = EncodecModel.encodec_model_24khz()
|
|
model.set_target_bandwidth(6.0)
|
|
model.eval()
|
|
model.to(device)
|
|
_clear_cuda_cache()
|
|
return model
|
|
|
|
|
|
def load_model(ckpt_path=None, use_gpu=True, force_reload=False, model_type="text"):
|
|
_load_model_f = funcy.partial(_load_model, model_type=model_type)
|
|
if model_type not in ("text", "coarse", "fine"):
|
|
raise NotImplementedError()
|
|
global models
|
|
if torch.cuda.device_count() == 0 or not use_gpu:
|
|
device = "cpu"
|
|
else:
|
|
device = "cuda"
|
|
model_key = str(device) + f"__{model_type}"
|
|
if model_key not in models or force_reload:
|
|
if ckpt_path is None:
|
|
ckpt_path = _get_ckpt_path(model_type)
|
|
clean_models(model_key=model_key)
|
|
model = _load_model_f(ckpt_path, device)
|
|
models[model_key] = model
|
|
return models[model_key]
|
|
|
|
|
|
def load_codec_model(use_gpu=True, force_reload=False):
|
|
global models
|
|
if torch.cuda.device_count() == 0 or not use_gpu:
|
|
device = "cpu"
|
|
else:
|
|
device = "cuda"
|
|
model_key = str(device) + f"__codec"
|
|
if model_key not in models or force_reload:
|
|
clean_models(model_key=model_key)
|
|
model = _load_codec_model(device)
|
|
models[model_key] = model
|
|
return models[model_key]
|
|
|
|
|
|
def preload_models(text_ckpt_path=None, coarse_ckpt_path=None, fine_ckpt_path=None, use_gpu=True):
|
|
_ = load_model(
|
|
ckpt_path=text_ckpt_path, model_type="text", use_gpu=use_gpu, force_reload=True
|
|
)
|
|
_ = load_model(
|
|
ckpt_path=coarse_ckpt_path, model_type="coarse", use_gpu=use_gpu, force_reload=True
|
|
)
|
|
_ = load_model(
|
|
ckpt_path=fine_ckpt_path, model_type="fine", use_gpu=use_gpu, force_reload=True
|
|
)
|
|
_ = load_codec_model(use_gpu=use_gpu, force_reload=True)
|
|
|
|
|
|
####
|
|
# Generation Functionality
|
|
####
|
|
|
|
|
|
def _tokenize(tokenizer, text):
|
|
return tokenizer.encode(text, add_special_tokens=False)
|
|
|
|
|
|
def _detokenize(tokenizer, enc_text):
|
|
return tokenizer.decode(enc_text)
|
|
|
|
|
|
def _normalize_whitespace(text):
|
|
return re.sub(r"\s+", " ", text).strip()
|
|
|
|
|
|
TEXT_ENCODING_OFFSET = 10_048
|
|
SEMANTIC_PAD_TOKEN = 10_000
|
|
TEXT_PAD_TOKEN = 129_595
|
|
SEMANTIC_INFER_TOKEN = 129_599
|
|
|
|
|
|
def generate_text_semantic(
|
|
text,
|
|
history_prompt=None,
|
|
temp=0.7,
|
|
top_k=None,
|
|
top_p=None,
|
|
use_gpu=True,
|
|
silent=False,
|
|
min_eos_p=0.2,
|
|
max_gen_duration_s=None,
|
|
allow_early_stop=True,
|
|
model=None,
|
|
):
|
|
"""Generate semantic tokens from text."""
|
|
assert isinstance(text, str)
|
|
text = _normalize_whitespace(text)
|
|
assert len(text.strip()) > 0
|
|
if history_prompt is not None:
|
|
semantic_history = np.load(
|
|
os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz")
|
|
)["semantic_prompt"]
|
|
assert (
|
|
isinstance(semantic_history, np.ndarray)
|
|
and len(semantic_history.shape) == 1
|
|
and len(semantic_history) > 0
|
|
and semantic_history.min() >= 0
|
|
and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
|
|
)
|
|
else:
|
|
semantic_history = None
|
|
model_container = load_model(use_gpu=use_gpu, model_type="text")
|
|
if model is None:
|
|
model = model_container["model"]
|
|
tokenizer = model_container["tokenizer"]
|
|
encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET
|
|
device = "cuda" if use_gpu and torch.cuda.device_count() > 0 else "cpu"
|
|
if len(encoded_text) > 256:
|
|
p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1)
|
|
logger.warning(f"warning, text too long, lopping of last {p}%")
|
|
encoded_text = encoded_text[:256]
|
|
encoded_text = np.pad(
|
|
encoded_text,
|
|
(0, 256 - len(encoded_text)),
|
|
constant_values=TEXT_PAD_TOKEN,
|
|
mode="constant",
|
|
)
|
|
if semantic_history is not None:
|
|
semantic_history = semantic_history.astype(np.int64)
|
|
# lop off if history is too long, pad if needed
|
|
semantic_history = semantic_history[-256:]
|
|
semantic_history = np.pad(
|
|
semantic_history,
|
|
(0, 256 - len(semantic_history)),
|
|
constant_values=SEMANTIC_PAD_TOKEN,
|
|
mode="constant",
|
|
)
|
|
else:
|
|
semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256)
|
|
x = torch.from_numpy(
|
|
np.hstack([encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN])]).astype(np.int64)
|
|
)[None]
|
|
assert x.shape[1] == 256 + 256 + 1
|
|
with _inference_mode():
|
|
x = x.to(device)
|
|
n_tot_steps = 768
|
|
# custom tqdm updates since we don't know when eos will occur
|
|
pbar = tqdm.tqdm(disable=silent, total=100)
|
|
pbar_state = 0
|
|
tot_generated_duration_s = 0
|
|
for n in range(n_tot_steps):
|
|
logits = model(x, merge_context=True)
|
|
relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE]
|
|
if allow_early_stop:
|
|
relevant_logits = torch.hstack(
|
|
(relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos
|
|
)
|
|
if top_p is not None:
|
|
# faster to convert to numpy
|
|
logits_device = relevant_logits.device
|
|
logits_dtype = relevant_logits.type()
|
|
relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
|
|
sorted_indices = np.argsort(relevant_logits)[::-1]
|
|
sorted_logits = relevant_logits[sorted_indices]
|
|
cumulative_probs = np.cumsum(softmax(sorted_logits))
|
|
sorted_indices_to_remove = cumulative_probs > top_p
|
|
sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
|
|
sorted_indices_to_remove[0] = False
|
|
relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
|
|
relevant_logits = torch.from_numpy(relevant_logits)
|
|
relevant_logits = relevant_logits.to(logits_device).type(logits_dtype)
|
|
if top_k is not None:
|
|
v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
|
|
relevant_logits[relevant_logits < v[-1]] = -float("Inf")
|
|
probs = F.softmax(relevant_logits / temp, dim=-1)
|
|
item_next = torch.multinomial(probs, num_samples=1)
|
|
if allow_early_stop and (
|
|
item_next == SEMANTIC_VOCAB_SIZE
|
|
or (min_eos_p is not None and probs[-1] >= min_eos_p)
|
|
):
|
|
# eos found, so break
|
|
pbar.update(100 - pbar_state)
|
|
break
|
|
x = torch.cat((x, item_next[None]), dim=1)
|
|
tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ
|
|
if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s:
|
|
pbar.update(100 - pbar_state)
|
|
break
|
|
if n == n_tot_steps - 1:
|
|
pbar.update(100 - pbar_state)
|
|
break
|
|
del logits, relevant_logits, probs, item_next
|
|
req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))])
|
|
if req_pbar_state > pbar_state:
|
|
pbar.update(req_pbar_state - pbar_state)
|
|
pbar_state = req_pbar_state
|
|
pbar.close()
|
|
out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :]
|
|
assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE)
|
|
_clear_cuda_cache()
|
|
return out
|
|
|
|
|
|
def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE):
|
|
assert len(arr.shape) == 2
|
|
arr = arr.copy()
|
|
if offset_size is not None:
|
|
for n in range(1, arr.shape[0]):
|
|
arr[n, :] += offset_size * n
|
|
flat_arr = arr.ravel("F")
|
|
return flat_arr
|
|
|
|
|
|
COARSE_SEMANTIC_PAD_TOKEN = 12_048
|
|
COARSE_INFER_TOKEN = 12_050
|
|
|
|
|
|
def generate_coarse(
|
|
x_semantic,
|
|
history_prompt=None,
|
|
temp=0.7,
|
|
top_k=None,
|
|
top_p=None,
|
|
use_gpu=True,
|
|
silent=False,
|
|
max_coarse_history=630, # min 60 (faster), max 630 (more context)
|
|
sliding_window_len=60,
|
|
model=None,
|
|
):
|
|
"""Generate coarse audio codes from semantic tokens."""
|
|
assert (
|
|
isinstance(x_semantic, np.ndarray)
|
|
and len(x_semantic.shape) == 1
|
|
and len(x_semantic) > 0
|
|
and x_semantic.min() >= 0
|
|
and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1
|
|
)
|
|
assert 60 <= max_coarse_history <= 630
|
|
assert max_coarse_history + sliding_window_len <= 1024 - 256
|
|
semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS
|
|
max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio))
|
|
if history_prompt is not None:
|
|
x_history = np.load(
|
|
os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz")
|
|
)
|
|
x_semantic_history = x_history["semantic_prompt"]
|
|
x_coarse_history = x_history["coarse_prompt"]
|
|
assert (
|
|
isinstance(x_semantic_history, np.ndarray)
|
|
and len(x_semantic_history.shape) == 1
|
|
and len(x_semantic_history) > 0
|
|
and x_semantic_history.min() >= 0
|
|
and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1
|
|
and isinstance(x_coarse_history, np.ndarray)
|
|
and len(x_coarse_history.shape) == 2
|
|
and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS
|
|
and x_coarse_history.shape[-1] >= 0
|
|
and x_coarse_history.min() >= 0
|
|
and x_coarse_history.max() <= CODEBOOK_SIZE - 1
|
|
and (
|
|
round(x_coarse_history.shape[-1] / len(x_semantic_history), 1)
|
|
== round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1)
|
|
)
|
|
)
|
|
x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE
|
|
# trim histories correctly
|
|
n_semantic_hist_provided = np.min(
|
|
[
|
|
max_semantic_history,
|
|
len(x_semantic_history) - len(x_semantic_history) % 2,
|
|
int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)),
|
|
]
|
|
)
|
|
n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio))
|
|
x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32)
|
|
x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32)
|
|
# TODO: bit of a hack for time alignment (sounds better)
|
|
x_coarse_history = x_coarse_history[:-2]
|
|
else:
|
|
x_semantic_history = np.array([], dtype=np.int32)
|
|
x_coarse_history = np.array([], dtype=np.int32)
|
|
if model is None:
|
|
model = load_model(use_gpu=use_gpu, model_type="coarse")
|
|
device = "cuda" if use_gpu and torch.cuda.device_count() > 0 else "cpu"
|
|
# start loop
|
|
n_steps = int(
|
|
round(
|
|
np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS)
|
|
* N_COARSE_CODEBOOKS
|
|
)
|
|
)
|
|
assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0
|
|
x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32)
|
|
x_coarse = x_coarse_history.astype(np.int32)
|
|
base_semantic_idx = len(x_semantic_history)
|
|
with _inference_mode():
|
|
x_semantic_in = torch.from_numpy(x_semantic)[None].to(device)
|
|
x_coarse_in = torch.from_numpy(x_coarse)[None].to(device)
|
|
n_window_steps = int(np.ceil(n_steps / sliding_window_len))
|
|
n_step = 0
|
|
for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent):
|
|
semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio))
|
|
# pad from right side
|
|
x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :]
|
|
x_in = x_in[:, :256]
|
|
x_in = F.pad(
|
|
x_in,
|
|
(0, 256 - x_in.shape[-1]),
|
|
"constant",
|
|
COARSE_SEMANTIC_PAD_TOKEN,
|
|
)
|
|
x_in = torch.hstack(
|
|
[
|
|
x_in,
|
|
torch.tensor([COARSE_INFER_TOKEN])[None].to(device),
|
|
x_coarse_in[:, -max_coarse_history:],
|
|
]
|
|
)
|
|
for _ in range(sliding_window_len):
|
|
if n_step >= n_steps:
|
|
continue
|
|
is_major_step = n_step % N_COARSE_CODEBOOKS == 0
|
|
logits = model(x_in)
|
|
logit_start_idx = (
|
|
SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE
|
|
)
|
|
logit_end_idx = (
|
|
SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE
|
|
)
|
|
relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx]
|
|
if top_p is not None:
|
|
# faster to convert to numpy
|
|
logits_device = relevant_logits.device
|
|
logits_dtype = relevant_logits.type()
|
|
relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy()
|
|
sorted_indices = np.argsort(relevant_logits)[::-1]
|
|
sorted_logits = relevant_logits[sorted_indices]
|
|
cumulative_probs = np.cumsum(softmax(sorted_logits))
|
|
sorted_indices_to_remove = cumulative_probs > top_p
|
|
sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy()
|
|
sorted_indices_to_remove[0] = False
|
|
relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf
|
|
relevant_logits = torch.from_numpy(relevant_logits)
|
|
relevant_logits = relevant_logits.to(logits_device).type(logits_dtype)
|
|
if top_k is not None:
|
|
v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1)))
|
|
relevant_logits[relevant_logits < v[-1]] = -float("Inf")
|
|
probs = F.softmax(relevant_logits / temp, dim=-1)
|
|
item_next = torch.multinomial(probs, num_samples=1)
|
|
item_next += logit_start_idx
|
|
x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1)
|
|
x_in = torch.cat((x_in, item_next[None]), dim=1)
|
|
del logits, relevant_logits, probs, item_next
|
|
n_step += 1
|
|
del x_in
|
|
del x_semantic_in
|
|
gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :]
|
|
del x_coarse_in
|
|
assert len(gen_coarse_arr) == n_steps
|
|
gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE
|
|
for n in range(1, N_COARSE_CODEBOOKS):
|
|
gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE
|
|
_clear_cuda_cache()
|
|
return gen_coarse_audio_arr
|
|
|
|
|
|
def generate_fine(
|
|
x_coarse_gen,
|
|
history_prompt=None,
|
|
temp=0.5,
|
|
use_gpu=True,
|
|
silent=True,
|
|
model=None,
|
|
):
|
|
"""Generate full audio codes from coarse audio codes."""
|
|
assert (
|
|
isinstance(x_coarse_gen, np.ndarray)
|
|
and len(x_coarse_gen.shape) == 2
|
|
and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1
|
|
and x_coarse_gen.shape[1] > 0
|
|
and x_coarse_gen.min() >= 0
|
|
and x_coarse_gen.max() <= CODEBOOK_SIZE - 1
|
|
)
|
|
if history_prompt is not None:
|
|
x_fine_history = np.load(
|
|
os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt}.npz")
|
|
)["fine_prompt"]
|
|
assert (
|
|
isinstance(x_fine_history, np.ndarray)
|
|
and len(x_fine_history.shape) == 2
|
|
and x_fine_history.shape[0] == N_FINE_CODEBOOKS
|
|
and x_fine_history.shape[1] >= 0
|
|
and x_fine_history.min() >= 0
|
|
and x_fine_history.max() <= CODEBOOK_SIZE - 1
|
|
)
|
|
else:
|
|
x_fine_history = None
|
|
n_coarse = x_coarse_gen.shape[0]
|
|
if model is None:
|
|
model = load_model(use_gpu=use_gpu, model_type="fine")
|
|
device = "cuda" if use_gpu and torch.cuda.device_count() > 0 else "cpu"
|
|
# make input arr
|
|
in_arr = np.vstack(
|
|
[
|
|
x_coarse_gen,
|
|
np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1]))
|
|
+ CODEBOOK_SIZE, # padding
|
|
]
|
|
).astype(np.int32)
|
|
# prepend history if available (max 512)
|
|
if x_fine_history is not None:
|
|
x_fine_history = x_fine_history.astype(np.int32)
|
|
in_arr = np.hstack(
|
|
[
|
|
x_fine_history[:, -512:].astype(np.int32),
|
|
in_arr,
|
|
]
|
|
)
|
|
n_history = x_fine_history[:, -512:].shape[1]
|
|
else:
|
|
n_history = 0
|
|
n_remove_from_end = 0
|
|
# need to pad if too short (since non-causal model)
|
|
if in_arr.shape[1] < 1024:
|
|
n_remove_from_end = 1024 - in_arr.shape[1]
|
|
in_arr = np.hstack(
|
|
[
|
|
in_arr,
|
|
np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE,
|
|
]
|
|
)
|
|
# we can be lazy about fractional loop and just keep overwriting codebooks
|
|
n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1
|
|
with _inference_mode():
|
|
in_arr = torch.tensor(in_arr.T).to(device)
|
|
for n in tqdm.tqdm(range(n_loops), disable=silent):
|
|
start_idx = np.min([n * 512, in_arr.shape[0] - 1024])
|
|
start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512])
|
|
rel_start_fill_idx = start_fill_idx - start_idx
|
|
in_buffer = in_arr[start_idx : start_idx + 1024, :][None]
|
|
for nn in range(n_coarse, N_FINE_CODEBOOKS):
|
|
logits = model(nn, in_buffer)
|
|
if temp is None:
|
|
relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE]
|
|
codebook_preds = torch.argmax(relevant_logits, -1)
|
|
else:
|
|
relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp
|
|
probs = F.softmax(relevant_logits, dim=-1)
|
|
codebook_preds = torch.hstack(
|
|
[
|
|
torch.multinomial(probs[n], num_samples=1)
|
|
for n in range(rel_start_fill_idx, 1024)
|
|
]
|
|
)
|
|
in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds
|
|
del logits, codebook_preds
|
|
# transfer over info into model_in and convert to numpy
|
|
for nn in range(n_coarse, N_FINE_CODEBOOKS):
|
|
in_arr[
|
|
start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn
|
|
] = in_buffer[0, rel_start_fill_idx:, nn]
|
|
del in_buffer
|
|
gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T
|
|
del in_arr
|
|
gen_fine_arr = gen_fine_arr[:, n_history:]
|
|
if n_remove_from_end > 0:
|
|
gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end]
|
|
assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1]
|
|
_clear_cuda_cache()
|
|
return gen_fine_arr
|
|
|
|
|
|
def codec_decode(fine_tokens, model=None, use_gpu=True):
|
|
"""Turn quantized audio codes into audio array using encodec."""
|
|
if model is None:
|
|
model = load_codec_model(use_gpu=use_gpu)
|
|
device = "cuda" if use_gpu and torch.cuda.device_count() > 0 else "cpu"
|
|
arr = torch.from_numpy(fine_tokens)[None]
|
|
arr = arr.to(device)
|
|
arr = arr.transpose(0, 1)
|
|
emb = model.quantizer.decode(arr)
|
|
out = model.decoder(emb)
|
|
audio_arr = out.detach().cpu().numpy().squeeze()
|
|
del arr, emb, out
|
|
return audio_arr
|