2024-03-21 11:02:20 -07:00
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n",
2024-03-28 16:21:30 -07:00
"os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"7\""
2024-03-21 11:02:20 -07:00
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
2024-03-28 16:21:30 -07:00
"outputs": [],
2024-03-21 11:02:20 -07:00
"source": [
"# import libs\n",
"import torch\n",
"import torchaudio\n",
"\n",
"from data.tokenizer import (\n",
" AudioTokenizer,\n",
" TextTokenizer,\n",
")\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
2024-03-28 16:21:30 -07:00
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Setting up corpus information\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Loading corpus from source files\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/100 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Found \u001b[1;36m1\u001b[0m speaker across \u001b[1;36m1\u001b[0m file, average number of utterances per \n",
"\u001b[2;36m \u001b[0m speaker: \u001b[1;36m1.0\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Initializing multiprocessing jobs\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Normalizing text\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Creating corpus split for feature generation\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/2 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Generating MFCCs\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Calculating CMVN\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Generating final features\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Creating corpus split with features\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Compiling training graphs\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Performing first-pass alignment\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Generating alignments\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Calculating fMLLR for speaker adaptation\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Performing second-pass alignment\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Generating alignments\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Collecting phone and word alignments from alignment lattices\u001b[33m...\u001b[0m \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 100%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1/1 \u001b[0m [ \u001b[33m0:00:01\u001b[0m < \u001b[36m0:00:00\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[2;36m \u001b[0m\u001b[33mWARNING \u001b[0m Alignment analysis not available without using postgresql \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Exporting alignment TextGrids to demo/temp/mfa_alignments\u001b[33m...\u001b[0m \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Finished exporting TextGrids to demo/temp/mfa_alignments! \n",
"\u001b[2;36m \u001b[0m\u001b[32mINFO \u001b[0m Done! Everything took \u001b[1;36m40.634\u001b[0m seconds \n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2K\u001b[35m 0%\u001b[0m \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0/1 \u001b[0m [ \u001b[33m0:00:00\u001b[0m < \u001b[36m-:--:--\u001b[0m , \u001b[31m? it/s\u001b[0m ]\n",
"\u001b[?25h"
]
}
],
2024-03-21 11:02:20 -07:00
"source": [
"# hyperparameters for inference\n",
2024-03-28 16:21:30 -07:00
"left_margin = 0.08 # not used for TTS, only for speech editing\n",
"right_margin = 0.08 # not used for TTS, only for speech editing\n",
2024-03-21 11:02:20 -07:00
"codec_audio_sr = 16000\n",
"codec_sr = 50\n",
"top_k = 0\n",
"top_p = 0.8\n",
"temperature = 1\n",
2024-03-28 16:21:30 -07:00
"kvcache = 1\n",
2024-03-21 11:02:20 -07:00
"silence_tokens=[1388,1898,131]\n",
2024-03-28 16:21:30 -07:00
"# adjust the below three arguments if the generation is not as good\n",
"seed = 1 # random seed magic\n",
"stop_repetition = 3 # if there are long silence in the generated audio, reduce the stop_repetition to 3, 2 or even 1\n",
"sample_batch_size = 4 # if there are long silence or unnaturally strecthed words, increase sample_batch_size to 2, 3 or even 4\n",
2024-03-21 11:02:20 -07:00
"# what this will do to the model is that the model will run sample_batch_size examples of the same audio, and pick the one that's the shortest\n",
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
"\n",
"# point to the original file or record the file\n",
"# write down the transcript for the file, or run whisper to get the transcript (and you can modify it if it's not accurate), save it as a .txt file\n",
2024-03-28 16:21:30 -07:00
"orig_audio = \"./demo/84_121550_000074_000000.wav\"\n",
2024-03-21 11:02:20 -07:00
"orig_transcript = \"But when I had approached so near to them The common object, which the sense deceives, Lost not by distance any of its marks,\"\n",
"\n",
"# move the audio and transcript to temp folder\n",
2024-03-28 16:21:30 -07:00
"temp_folder = \"./demo/temp\"\n",
2024-03-21 11:02:20 -07:00
"os.makedirs(temp_folder, exist_ok=True)\n",
"os.system(f\"cp {orig_audio} {temp_folder}\")\n",
"filename = os.path.splitext(orig_audio.split(\"/\")[-1])[0]\n",
"with open(f\"{temp_folder}/{filename}.txt\", \"w\") as f:\n",
" f.write(orig_transcript)\n",
"# run MFA to get the alignment\n",
"align_temp = f\"{temp_folder}/mfa_alignments\"\n",
"os.makedirs(align_temp, exist_ok=True)\n",
"os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp}\")\n",
"# if the above fails, it could be because the audio is too hard for the alignment model, increasing the beam size usually solves the issue\n",
"# os.system(f\"mfa align -j 1 --output_format csv {temp_folder} english_us_arpa english_us_arpa {align_temp} --beam 1000 --retry_beam 2000\")\n",
"audio_fn = f\"{temp_folder}/{filename}.wav\"\n",
"transcript_fn = f\"{temp_folder}/{filename}.txt\"\n",
"align_fn = f\"{align_temp}/{filename}.csv\""
]
},
{
"cell_type": "code",
2024-03-28 16:21:30 -07:00
"execution_count": 5,
2024-03-21 11:02:20 -07:00
"metadata": {},
"outputs": [
{
2024-03-28 16:21:30 -07:00
"name": "stdout",
"output_type": "stream",
"text": [
"concatenate prompt and generated:\n"
]
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRqQ1BABXQVZFZm10IBAAAAABAAEAgD4AAAB9AAACABAAZGF0YYA1BADT/14AsACg/5T/ff+d/6H/W/9ZACcAvv9Q/2H/EwArAEoALADp/x8AOwA3AAgAIgARAN3/3//l//X/FgARAAMA6P/m/9z/yv8TAOv/0//C/6j/CgAkABAA///h//z/DwDa/6r/q//r/w4A7//O/+n/LwBOAFQAMgD1/+3/EAD9/w0AIwAZAP//IAAwABwAJQAjACcAJgAfADsASwAtADgAZQBpACwADgAKAAQAJgA+ADgANQBHAFkAWAAmACIARQBLAEEARAAcACAATABaAFQAMQAEAPH/2//c/1gAZAAwADcAJwDt/+7/MQAzABAA/f8hAPH/xv/n/wMAIgAPAAgA///k/87/0P8AAAAAAQDo/97/3P/L/+b/+v/2/87/5P/s////+//p//j/4f8AAAsACQAEAPv/6v/i//n/CQAQAAkAAgDy/+H/1P/j//L//v8KAAkAAgD//+T/zv/F/7f/vP/P/+n/6v/h//T/+P/S/+j/3P+x/5//m/+0/7D/yP+k/7f/uf+W/7L/wv/s/+X/y//K/87/4v/g/9j/3f/i//T/EwAhADAAKwDy//P/CAARAAgAAwAnACoACADr//3//P8QACYAPgBQADkAUgBuAGUAWABVAEsAYwB8AGEARwA5ACgALwAoAA8AKQAxACoAMgAkAAoA9v8YACcAHAAmABIAIQApACsALgAvADEAFwAyAD4ALAAsACgAHQAAABMAIAAbAB8AKgArAPf/7P/5/+7/5//7/xAABgAJAAQA6//3/+z/2v/d//D/CQD9//L/6//a/9P/5/8AAOj/3v/i/+7/9P/4/woACADw//b/HgAFAOH/w//H/9r/2f/Z/9L/0f/N/8f/x//U/7z/l/+P/5H/hv+D/4n/mP+W/47/if+B/3z/d/97/43/mP+q/8n/uf+m/5n/iv+s/6f/of+y/7//1v/j/+D/0f+//8H/2//L/7T/zP/P/9r/8v/X/9T/4f/p//b/3v/S/9f/1v/G/+H/FQAQABIAHAADAPb/9P/2//f/9/8EABMALwAuADQAKAAQAAoADQAfADwAQgAUABAAEgANAAgABwAHAAkADgD5/+b/6P/2//r/6v/n//n/AgDv/+v/6//n//L/6P/0//b/4f/e/9f/z//U/9n/5v/y//b//f/5//7/AQAAAAoAEgALAAoAGwAVABAAIgAvACoAQABYAFUAXABJAEIATQBYAFwAXgBlAGAAXgBiAFkAUQBIAFAAWwBbAGgAcAB5AJIApAC0ALIAtwDFAMUAyAC2AKIApwCqAKIAmwChAKwAsQCvAKMAmgCRAIQAhACTAKEAowCuAK8AnQCKAHUAegB8AH0AhACIAJIAnQCWAIgAhgCAAGwAbgBwAG4AdAB0AHkAfgB6AIMAeQB6AIYAiACBAHoAfQB9AHcAhACJAIoAhwCEAIIAcAB3AHkAiQB9AGcAdQB7AFMATgBfAFMAPQA1ACwAGQAaAAYA2v/Q/8f/1f/z//n/AADz/93/u/+n/5T/h/+G/5D/pP+2/7r/sv+u/6L/m/+e/5L/kf+I/33/eP90/3X/Zf9p/1b/P/9E/0T/R/9H/1P/Vv9h/1X/PP8x/yP/FP8D//H+2v7N/sf+wv7L/tD+2/7x/tf+1v7z/vz++/4G//z+Bf8Q/wb/HP8q/zX/Of82/0j/Uv93/5n/pf++/8P/x//H/+H/9f8AACMAKwA2ACAABgARACAANwBJAHAAnADVAPkA8wDtAOMA3QDEAJMAZQBWAEQAkQCLAIMAtwDNAN4AdABGAHwAeQBXAFoAzQChAFsAtQDCAH8AUACeAJEAGQD//zMAaQBwADwA8v9jALX/0/6x/zMAKv8u/xIA3v/x/0YAFAAVAO//AAAyAEIAdQB//47/AwDE/7f/+P9WANn/CgAcALUAuQDg//kA0AEFAWMBXQJPAokB1AEOAoMBHQJUAs0BoQHmARECdALoAX4CVQO7AokCgALVAsAClQLSAR4AFgCSACcA/f+1/1D9Z/70/AX+Sf8dBJ7+yf0KCXr07AhhERn0lgHGJMMrGBm1GDEQ/hQ9H3YQKBJDF8YUVBBNEmARzgYRDowGqP+vAP/2sPo4ASL6I/KY9ajvp++M9UTtZ+218zL0MfSz+LH43/nCAKz/zv7JAgcCsACuAiX/Av21/6n+Afnt9yr4bPWV+bf3PPVS+iT3z/Sl+WP5uPct/Hz8w/tb/pb9hfsV/eH92fv7+1f8Lfuj/Lz8zPsz/PL7K/zd/LP9v/1z/lv+i/7h/pv/GACo/8j+Xf61/g//uv4O/hD9yfz1/UH++/wY/Af8p/vD+sX54fke+nD5ePie+On4Hfim94T3afe59tH1xvQ59BfzC/I68r/xcvC68Bfxg/A28CXv0e3E7Xnufe4S7gHuBO4g7qft+e3k7uXufvBK8iXzifMT9jr5KPyC/5EBwQQvCAYLMw44EWsTmxYHGoUbfh1XH1sf/B2yHIcaIhj2FfwSBBECDv0JLAaxAp/+lPqf98T1R/R68zfymfCk8ADww+5Y7Sftae0V743wM/EE8vLxM/JY8rnxevFB8uPyo/Mb9Ar0LvWV+Ir7uv1TAC8ElAnhDaAQwRM6F8MY1RnRGm8aUBv4HEQdNxyeGsAYoRf8FRASSRAND30MIQyTDBQNxw1dD5YQFRFdERESWxM/EyoTAxP3Ea4PIg0aCyII1QR3AXL90/nh9Z/wt+2G7LvqS+l76ZnqeeoJ6U/nneeG5anh2eAz4uThi9933lPdl9pv1o3TSdLw0vbX4t8n6RH0DgHsDN4XlSBmJhYsJzByMnk1HDnqOeU5CzlxNM8ryCHzF+oOswVZ/L73CvYl9Xr0r/RV9U73hPm5+qD9VwKPBsQK1Q9dFLYWRRcZF7wVgRJtDqEKrQYfA17/R/xQ+Wr2gvNA8bjvFu/r7wzx0POz92/7H/72AKYDcAYzCGYIcQgsCKAHbAc5Br0DywBq/tD7c/mS9Uvy1PCm8JfwT/Ee9EL4V/0WAnMG7AlnDmcUNRr1HQsgMiMOJmYlfyBYGscVoQ8yCLkB5/0y+xn5APh/+OH4zfkB+0b9SwDBAmgEcwdfCv8Jkwg7BwYE5/3R9j3v1Ogy4gjaBNMXzRjHIcA8usS2IrMVsROyjbe0vmfFsMsl3kD5wA/2G2IqHzw6RfpJtkVEQZg79jc3M20vZyfdF5UNmQNW83HeSNKgzBHM4swh0nbcaetw+WsECA8/Fe8aWSGYKWcrTyq9LAgw1i3SJJUaMhHSByX9+vFY6e3j9+Dn4P/jueXB5nPru/Iw+NL6EgBOB2MPjBX1GfccgR7bHUwaWBV0DY4FGwBV/dv54PTL8UnxkfIl8Xju9e2u7o3wMfNL9/H5Hf0kA1UIBAsqCrwIEQhLB2sEY/8P/OH60/qy+vP5o/mm+Tf66Ptm/HP8bv60BC4MGxATE1gXpBw9Hk4cwRnVFmUU0BCWDUkKoAaABXUGxQaSA0UAQgA9AXv/svol97L2T/XG8f3tzeqc5kvhztuj1AzN9cb+w17DgMJtvqu+w8gb1CXZAuQm/OsRWx8bKDE0hj7OPzE72DZuM6YpgCLSIJ8a9wrm/KP15Ox54IvTTM6B0fnWJdyT6HH4GwUiEEkdZyaKJ5EoQCtBLTgp3iHkHeocVRYKCowAVvrq8ILn++KY4NTe/eB66O3wsvY//HQEHA7RErQTMxYyGWIZnRdFFmcS5AxJB2ACs/sb8snpeuVV46Hf1dzj3gnj/OZh6rTuZvP49xT+SAQYCXIN6xKgGTMbTBiHFe0VpBJQCNf/FPwp+d7zRfDB7SrsQOw77U3w/fMq+Cz/pgkwEnsW/RuLIUQioiCSHxgftx1iGw4YoBSNEEAKpgQI/y75GvRy8cHwAPAX8XDzbPVj9kH14PJK7qXp+eJb2u3T1c2Gy7vLxcnAxl/F0Mh2yu7MadHY3MzwowSPF/En6TU3PRFDoEMvO5k
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
2024-03-21 11:02:20 -07:00
"output_type": "stream",
"text": [
2024-03-28 16:21:30 -07:00
"generated:\n"
2024-03-21 11:02:20 -07:00
]
2024-03-28 16:21:30 -07:00
},
{
"data": {
"text/html": [
"\n",
" <audio controls=\"controls\" >\n",
" <source src=\"data:audio/wav;base64,UklGRiS8AgBXQVZFZm10IBAAAAABAAEAgD4AAAB9AAACABAAZGF0YQC8AgClAhkDaANo/t78J/vX/JL9hPyzAXYBOf13+mD87P+a/0kApQFiAbYBLQMIAsf9LQCAACz9jf75/kb/sP/q/gP/6f7m/379evsz/QT7uPlx+mv8EgHeAn8AKgCdAH3+Q/2q/GD+3wA7A8oEMQK9/3D+dv89/nH8JP8A/2cANwPMAe0BKQLoAN7/WgJjBX8CzgEZBIAC5f8g/U78k/+E/1wAWQMaBMQCawGx/6L+HP/c/7YBUQLoApECmgCf/RT74for+xv8Hv9WALf/q/9K/xT/Pv1r/VcAjgEHA2kFcQaAA/oBiwBg+qf64f2X/rz+Of+nA1QCVP/qAEUAOwGJAxgFmAfBBqcEWATPAZv9nPzK/Fv9HwApAZsAEgGQAfcAzwCgAWsE0AXTBgAIdwXLA/YBsf9//7MAdwCc/8gB6QCtAM3/fv7iALsAMgisDHQBYgM2EC8IefUt9Xv54vcS/I8AmQTLBtIFLgX5A1oEugSMBBwHFAf/Aaz+Mv6i+3/5ovii+Xv+QgO6ATH9m/5M/xH9N/+kAmwE7AZuCUkIfAaeAxf/pf2Q/Br7nfqk+Wj56PgO+Cv3gPa7+Kn6F/oA/JL+5f5QAGkB1QCVAUIBXQDv/xf+H/0l/Mv6v/p1+2H7ZPub/PD8Zf2X/u3+sv9FAacBIQJlAsEB2QF4AeYACwGgAGYAoADeANoAgf/O/rz/0/+Z/y4A3QBgAD0AbgCV/0j/CP+G/nj+jv4Z/kP9I/2D/W/9WP3M/Uz+GP5W/vX+F//y/iT+Of7E/iv+Zf10/Sz+HP7H/eX9jP4z/xD/bf8QAGcA1AAWAf8AwgBaANb/QwBnAPT/GgCdAGoApv91/2v/yP8NAPj/ugBkAsoC2wEQAoMBxP/b/6wAnQB9AHYB0gGYAOn/a//j/vv+F/+I/4MApQAeAGr/N/9f/6z+e/4X/+//rAC9AAkBTQH/AKEArgDpAIsA7P9bAMUA0f8M/0v/hv9t/0f/Yv+d/z0AbACR/00AhgGBAXsBgAGKAe0BhAH8AJAAPABvAHkAPwAWAC0A/f+s/9f/8P/b/1AAjABDAEsAJwCl/6D/jf8W//X+1P7b/tb+yv78/gP/Vv95/33/6v8OAOD/nv82/+z+bf5D/qv+jv5x/q7+i/7A/vz+Sf+X/3//tv9oAOcArwC0AAsBSgEJAWEAfQCmAFMAn/8m/3L/yP+F/zX/DQDWAJsAnQDoANcADgH9ANcAAgGLAMwAJAB7/2H/TP/j/u3+s/8R/07/u/86/zr/M//L/mT/xf///jL/iv8F//b+Mf+m/vr+Zf9H/y8A+P+m/xgA0v9+/+T/HwC5/5H/3v8eANT/Uf9P/0D/gf/u/4j/NwCeAIMAAwEgAUsBSwE0AS4BDwGzAEgAQAAvAO3/1//B/4f/sP+Z/4P/2f/m/9z/EAAaAAwAYABGABYAZgBLANr/+/8dALP/5f88ANT/9f93AAMAIgCsAHMAOAB8AIgAIAABAF4AUwBAAFwAgwD9ABwBZQHIAcIB9wEtAhkCfwLpAqcCUQJ8Ak4CtQHfAbkBWAG2AacBvAENAsYBlQGQAaMBXwGhAZsB0QAHAQMBbwCBAJIATAB7AAQBKgH5ALsA2wD1AOcArgBoAFkAQgAWANv/qv9c/23/iv9a/9n/PAD2/yEAewBuAFYAkgC0AAIBGQHPANIAAwHfAJUAwADgAAkBzQB+AOcA+QBjADoAmgB+AHwAtwCbAKsAlACGAJ4AdgDKAOwA2wAnASAB+ADoANQA7gC4AG4AYAAoAAoAvf9R/0X/X/8x/wP/Gf9c/wf/6f75/qf+Dv8J/+z+Y/9f/3z/pv9y/3f/S/8h/y//X/9l/73+uv4r/xf/7f63/nj+nP76/iX/V/+p/63/sP8/AP7/1/8+ACIAFQAfAA0A7//n/6L/1v/3/4//rv+L/1z/g/+g/2b/E/9d//T+Xf6p/l3+IP4T/qP9kf0F/hz+8/2P/qj+uP4R/6f+mP70/qX+bP6W/mL+Of4V/hn+zP2Q/YP9Vv0S/mT+EP6Y/p7+Sv62/p3+6/5Z/zP/i/9n/wz/BP8H/7v+uf5B/0H/Sv+d/5D/vP/K/5P/tf++/6H/if9q/2b/BP/F/tX+4f7u/g3/gP+f/6D/r/+o/+X/4v+1/+P/zP8XADwA3P/g/7D/Uv8X/xL/V/92/9f/LwAYAC4A9P/p/w8A+f/+/x8ANgBoALAAqQDUAOAA8wAhAekAGwGEAZIBmQHqAeABuAEBAu4BpAGXAZIBbgE+AUoBPAFCAT8BSQFrAVwBZwE0AQQBTwEhAbUAhABdAHcAWAAkAFgAcwAxAE8AdACsAH4AbQCJAEoA8P/U/ygAEAAhAIwAmADIABgB6wD8AGoBRgE1Ab4B2QHqAeUBuwHCAWsBSgEwASQBZwGAAZ8BxwHNAe8B9QG8AdABCwLXAbIB1wFoARQBWAHUAHMAxgDCAI8AuQDbAIMAegCrALQAYgAmAC8A7f9q/+X+q/6v/un+F/9U/7//2P+z/1z/eP+U/8n/BwBDALEAagBEABAAqP98/6H/2f/k/yEAbgBDACkAeABqAPX/OAC/AI8AVgCfACcAmf+j/3L/rf+t/1D/kv/v/7n/GAArAM//4P+8/17/5P4R/8T+Hf5i/j/+7f0G/p39zv3H/XH90/20/df9V/5b/vX99v1W/uD9wP0E/tn97f1Z/oX+av6a/sX+yP7c/vf+V/+E/7H/PgCQAGYANQAuAEsAYgCKAIEAgwCyAMIALwEjAREBTgFrAYABbwFWAQwBggAPAOn/9v8kAOL/Xv/L/+//7f7K/rj+rv7n/vT+3f60/oz+X/4x/hr+i/5U/k/+fP49/n/+tP6J/or+1P4Y/wr/MP9o/1D/oP+y/3X/pP8AAJ//fv82ALMA9QChAJAA7gAFAQABAwG+ARYCAAIEAhkC3wGgAaUBmAH4AUMCBwKHAecBxwEwAS8B1ACtAOAAugBlAF8AHgAzADIA1v8BANH/hv+8/63/OP9O/13/Bv/q/q7+wv7m/vb+0v7e/hH/Bf96/7L/df9N/7D/o/81/6H/2v/o/00AiACsAPMAIAFCAXABigHbAQsCPQKPAqECjgLAAr0CTAICAgwCMwITAtkBBQLmAakBpAF7AS4BKgEZAeUA4wDAAFAAGwAUANb/f/8y/2n/gv9o/0b/bP9r//r+Y/9q//X++f5+/9H/Xv9r/3//3/8nAPX/7v/6/1cAegDZACQB1QC0AMEA3gDXAKQAnQAYAVgBEQEWAQYBlgBxAJIAHAD0/w0A4P/W/+r/xf/E/+X/l/+V/4v/H/87/zz/3P6T/pX+ff51/of+H/7j/QT+Ov7+/RP+Xv4D/un95/3Y/bP9vv30/Rv+P/5l/rr+v/4+/lz++f4l/yf/2P7v/k7/dP9m/2X/vf/o/wYAIgAWAAAABgDj/zkAxQB2AKIA1wCtAK8AKADb/w0ANQA+AHMAWwD//9X/hv+K/4b/Uf9j/2n/Dv/s/uf+pf6J/oz+iv6M/vX+MP8M//7+N/8p/7z+pP65/rf+mv6l/gb/3P6//h7/ev+i/8n/FwDN/xMAewA+AEoAewB9AHsA1ACsAHkAowDCAB8BfgGTAZwBXAE1AXsBjAHpANoABgHaAMQAoQCxAIUAdABiABYA1/8DAOf/sf+//9j/7v/K/9n/yv+1/6z/g/+L/5r/Xf9R/3T/sP/C/9//HwD+/y0AcQAqAB0ASgBqAMMA4AAGAQABGgH1AJ4A7AD/AOUARgGfAXwBTwE1ATgBUAFWAWYBZQFiAWYBjgHbAZwBTgFnAW8BMwEYAVUBDwHqACQBCgEJAfIAvACtAIwAmgCdAH8AlQBQAAYA8//M/9T/0f/C/7H/nf9e/2X/0P8jAEMAJQBjAIMASgA7AC4APwB7AJoAYAA5AEMALQA5ADQAMgBXAGcAmgC/AOsADAEIATQBPwEIAfAA0gC/AKUAegBIACY
" Your browser does not support the audio element.\n",
" </audio>\n",
" "
],
"text/plain": [
"<IPython.lib.display.Audio object>"
]
},
"metadata": {},
"output_type": "display_data"
2024-03-21 11:02:20 -07:00
}
],
"source": [
"# take a look at demo/temp/mfa_alignment, decide which part of the audio to use as prompt\n",
2024-03-28 16:21:30 -07:00
"cut_off_sec = 3.01 # NOTE: according to forced-alignment file, the word \"common\" stop as 3.01 sec, this should be different for different audio\n",
2024-03-21 11:02:20 -07:00
"target_transcript = \"But when I had approached so near to them The common I cannot believe that the same model can also do text to speech synthesis as well!\"\n",
"info = torchaudio.info(audio_fn)\n",
"audio_dur = info.num_frames / info.sample_rate\n",
"\n",
"assert cut_off_sec < audio_dur, f\"cut_off_sec {cut_off_sec} is larger than the audio duration {audio_dur}\"\n",
"prompt_end_frame = int(cut_off_sec * info.sample_rate)\n",
"\n",
"\n",
"# # load model, tokenizer, and other necessary files\n",
"from models import voicecraft\n",
2024-03-28 16:21:30 -07:00
"voicecraft_name=\"giga830M.pth\"\n",
"ckpt_fn =f\"./pretrained_models/{voicecraft_name}\"\n",
"encodec_fn = \"./pretrained_models/encodec_4cb2048_giga.th\"\n",
"if not os.path.exists(ckpt_fn):\n",
" os.system(f\"wget https://huggingface.co/pyp1/VoiceCraft/resolve/main/{voicecraft_name}\\?download\\=true\")\n",
" os.system(f\"mv {voicecraft_name}\\?download\\=true ./pretrained_models/{voicecraft_name}\")\n",
"if not os.path.exists(encodec_fn):\n",
" os.system(f\"wget https://huggingface.co/pyp1/VoiceCraft/resolve/main/encodec_4cb2048_giga.th\")\n",
" os.system(f\"mv encodec_4cb2048_giga.th ./pretrained_models/encodec_4cb2048_giga.th\")\n",
"\n",
2024-03-21 11:02:20 -07:00
"ckpt = torch.load(ckpt_fn, map_location=\"cpu\")\n",
"model = voicecraft.VoiceCraft(ckpt[\"config\"])\n",
"model.load_state_dict(ckpt[\"model\"])\n",
"model.to(device)\n",
"model.eval()\n",
"\n",
"phn2num = ckpt['phn2num']\n",
"\n",
"text_tokenizer = TextTokenizer(backend=\"espeak\")\n",
"audio_tokenizer = AudioTokenizer(signature=encodec_fn) # will also put the neural codec model on gpu\n",
"\n",
"# run the model to get the output\n",
"decode_config = {'top_k': top_k, 'top_p': top_p, 'temperature': temperature, 'stop_repetition': stop_repetition, 'kvcache': kvcache, \"codec_audio_sr\": codec_audio_sr, \"codec_sr\": codec_sr, \"silence_tokens\": silence_tokens, \"sample_batch_size\": sample_batch_size}\n",
"from inference_tts_scale import inference_one_sample\n",
"concated_audio, gen_audio = inference_one_sample(model, ckpt[\"config\"], phn2num, text_tokenizer, audio_tokenizer, audio_fn, target_transcript, device, decode_config, prompt_end_frame)\n",
" \n",
"# save segments for comparison\n",
"concated_audio, gen_audio = concated_audio[0].cpu(), gen_audio[0].cpu()\n",
"# logging.info(f\"length of the resynthesize orig audio: {orig_audio.shape}\")\n",
"\n",
"\n",
2024-03-28 16:21:30 -07:00
"# display the audio\n",
"from IPython.display import Audio\n",
"print(\"concatenate prompt and generated:\")\n",
"display(Audio(concated_audio, rate=codec_audio_sr))\n",
"\n",
"print(\"generated:\")\n",
"display(Audio(gen_audio, rate=codec_audio_sr))\n",
"\n",
"# # save the audio\n",
"# # output_dir\n",
"# output_dir = \"/home/pyp/VoiceCraft/demo/generated_tts\"\n",
"# os.makedirs(output_dir, exist_ok=True)\n",
"# seg_save_fn_gen = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_gen_seed{seed}.wav\"\n",
"# seg_save_fn_concat = f\"{output_dir}/{os.path.basename(audio_fn)[:-4]}_concat_seed{seed}.wav\" \n",
2024-03-21 11:02:20 -07:00
"\n",
2024-03-28 16:21:30 -07:00
"# torchaudio.save(seg_save_fn_gen, gen_audio, codec_audio_sr)\n",
"# torchaudio.save(seg_save_fn_concat, concated_audio, codec_audio_sr)\n",
2024-03-21 11:02:20 -07:00
"\n",
"# if you get error importing T5 in transformers\n",
"# try \n",
"# pip uninstall Pillow\n",
"# pip install Pillow\n",
"# you are might get warnings like WARNING:phonemizer:words count mismatch on 300.0% of the lines (3/1), this can be safely ignored"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "voicecraft",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.18"
}
},
"nbformat": 4,
"nbformat_minor": 2
}