mirror of
https://github.com/coqui-ai/TTS.git
synced 2025-12-25 04:39:29 +01:00
* add configs * Update config file * Add model configs * Add model layers * Add layer files * Add layer modules * change config names * Add emotion manager * fIX missing ap bug * Fix missing ap bug * Add base TTS e2e class * Fix wrong variable name in load_tts_samples * Add training script * Remove range predictor and gaussian upsampling * Add helper function * Add vctk recipe * Add conformer docs * Fix linting in conformer.py * Add Docs * remove duplicate import * refactor args * Fix bugs * Removew emotion embedding * remove unused arg * Remove emotion embedding arg * Remove emotion embedding arg * fix style issues * Fix bugs * Fix bugs * Add unittests * make style * fix formatter bug * fix test * Add pyworld compute pitch func * Update requirments.txt * Fix dataset Bug * Chnge layer norm to instance norm * Add missing import * Remove emotions.py * remove ssim loss * Add init layers func to aligner * refactor model layers * remove audio_config arg * Rename loss func * Rename to delightful-tts * Rename loss func * Remove unused modules * refactor imports * replace audio config with audio processor * Add change sample rate option * remove broken resample func * update recipe * fix style, add config docs * fix tests and multispeaker embd dim * remove pyworld * Make style and fix inference * Split tts tests * Fixup * Fixup * Fixup * Add argument names * Set "random" speaker in the model Tortoise/Bark * Use a diff f0_cache path for delightfull tts * Fix delightful speaker handling * Fix lint * Make style --------- Co-authored-by: loganhart420 <loganartpersonal@gmail.com> Co-authored-by: Eren Gölge <erogol@hotmail.com>
73 lines
2.4 KiB
Python
73 lines
2.4 KiB
Python
import glob
|
|
import json
|
|
import os
|
|
import shutil
|
|
|
|
from trainer import get_last_checkpoint
|
|
|
|
from tests import get_device_id, get_tests_output_path, run_cli
|
|
from TTS.tts.configs.align_tts_config import AlignTTSConfig
|
|
|
|
config_path = os.path.join(get_tests_output_path(), "test_model_config.json")
|
|
output_path = os.path.join(get_tests_output_path(), "train_outputs")
|
|
|
|
|
|
config = AlignTTSConfig(
|
|
batch_size=8,
|
|
eval_batch_size=8,
|
|
num_loader_workers=0,
|
|
num_eval_loader_workers=0,
|
|
text_cleaner="english_cleaners",
|
|
use_phonemes=False,
|
|
phoneme_language="en-us",
|
|
phoneme_cache_path=os.path.join(get_tests_output_path(), "train_outputs/phoneme_cache/"),
|
|
run_eval=True,
|
|
test_delay_epochs=-1,
|
|
epochs=1,
|
|
print_step=1,
|
|
print_eval=True,
|
|
test_sentences=[
|
|
"Be a voice, not an echo.",
|
|
],
|
|
)
|
|
|
|
config.audio.do_trim_silence = True
|
|
config.audio.trim_db = 60
|
|
config.save_json(config_path)
|
|
|
|
# train the model for one epoch
|
|
command_train = (
|
|
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --config_path {config_path} "
|
|
f"--coqpit.output_path {output_path} "
|
|
"--coqpit.datasets.0.formatter ljspeech "
|
|
"--coqpit.datasets.0.meta_file_train metadata.csv "
|
|
"--coqpit.datasets.0.meta_file_val metadata.csv "
|
|
"--coqpit.datasets.0.path tests/data/ljspeech "
|
|
"--coqpit.test_delay_epochs 0 "
|
|
)
|
|
run_cli(command_train)
|
|
|
|
# Find latest folder
|
|
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
|
|
|
|
# Inference using TTS API
|
|
continue_config_path = os.path.join(continue_path, "config.json")
|
|
continue_restore_path, _ = get_last_checkpoint(continue_path)
|
|
out_wav_path = os.path.join(get_tests_output_path(), "output.wav")
|
|
|
|
# Check integrity of the config
|
|
with open(continue_config_path, "r", encoding="utf-8") as f:
|
|
config_loaded = json.load(f)
|
|
assert config_loaded["characters"] is not None
|
|
assert config_loaded["output_path"] in continue_path
|
|
assert config_loaded["test_delay_epochs"] == 0
|
|
|
|
# Load the model and run inference
|
|
inference_command = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' tts --text 'This is an example.' --config_path {continue_config_path} --model_path {continue_restore_path} --out_path {out_wav_path}"
|
|
run_cli(inference_command)
|
|
|
|
# restore the model and continue training for one more epoch
|
|
command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_tts.py --continue_path {continue_path} "
|
|
run_cli(command_train)
|
|
shutil.rmtree(continue_path)
|