diff --git a/clone_voice.ipynb b/clone_voice.ipynb index c25f7e3..7c687bc 100644 --- a/clone_voice.ipynb +++ b/clone_voice.ipynb @@ -9,13 +9,10 @@ "from bark.generation import load_codec_model, generate_text_semantic\n", "from encodec.utils import convert_audio\n", "\n", - "from transformers import BertTokenizer\n", - "\n", "import torchaudio\n", "import torch\n", "\n", - "model = load_codec_model(use_gpu=True)\n", - "tokenizer = BertTokenizer.from_pretrained(\"bert-base-multilingual-cased\")" + "model = load_codec_model(use_gpu=True)" ] }, { @@ -119,9 +116,15 @@ "outputs": [], "source": [ "from bark.api import generate_audio\n", + "from transformers import BertTokenizer\n", "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic\n", + "\n", + "# Enter your prompt and speaker here\n", "text_prompt = \"Hello, my name is Serpy. And, uh — and I like pizza. [laughs]\"\n", - "voice_name = \"speaker_0\" # use your custom voice name here if you have one" + "voice_name = \"speaker_0\" # use your custom voice name here if you have one\n", + "\n", + "# load the tokenizer\n", + "tokenizer = BertTokenizer.from_pretrained(\"bert-base-multilingual-cased\")" ] }, { diff --git a/generate.ipynb b/generate.ipynb index e124459..174d7f8 100644 --- a/generate.ipynb +++ b/generate.ipynb @@ -7,9 +7,15 @@ "outputs": [], "source": [ "from bark.api import generate_audio\n", + "from transformers import BertTokenizer\n", "from bark.generation import SAMPLE_RATE, preload_models, codec_decode, generate_coarse, generate_fine, generate_text_semantic\n", + "\n", + "# Enter your prompt and speaker here\n", "text_prompt = \"Hello, my name is Serpy. And, uh — and I like pizza. [laughs]\"\n", - "voice_name = \"speaker_0\" # use your custom voice name here if you have one" + "voice_name = \"speaker_0\" # use your custom voice name here if you have one\n", + "\n", + "# load the tokenizer\n", + "tokenizer = BertTokenizer.from_pretrained(\"bert-base-multilingual-cased\")" ] }, {