Files
Voice-Cloning-App/application/views.py

453 lines
16 KiB
Python
Raw Normal View History

2021-03-10 15:56:40 +00:00
import os
import sys
import inflect
import io
import zipfile
2021-03-16 17:48:27 +00:00
import traceback
2021-03-18 12:27:38 +00:00
import torch
2021-03-10 15:56:40 +00:00
sys.path.append("synthesis/waveglow/")
from main import app, paths
2021-03-22 19:34:45 +00:00
from application.utils import (
start_progress_thread,
get_next_url,
2021-04-01 19:12:38 +01:00
get_suffix,
2021-04-07 20:26:51 +01:00
delete_folder,
2021-05-02 17:09:23 +01:00
import_dataset,
2021-03-22 19:34:45 +00:00
)
2021-04-15 00:23:55 +01:00
from dataset.create_dataset import create_dataset
2021-07-03 13:19:47 +01:00
from dataset.clip_generator import CHARACTER_ENCODING
2021-04-15 00:23:55 +01:00
from dataset.extend_existing_dataset import extend_existing_dataset
2021-06-04 22:53:03 +01:00
from dataset.analysis import get_total_audio_duration, validate_dataset
2021-07-17 17:08:59 +01:00
from dataset.transcribe import create_transcription_model
2021-07-25 18:12:27 +01:00
from training.train import train, DEFAULT_ALPHABET
2021-06-03 19:57:18 +01:00
from training.utils import get_available_memory, get_batch_size, load_symbols
2021-04-24 16:06:12 +01:00
from synthesis.synthesize import load_model, synthesize
2021-07-28 14:03:29 +01:00
from synthesis.vocoders import Waveglow, Hifigan
2021-03-10 15:56:40 +00:00
2021-06-04 22:53:03 +01:00
from flask import redirect, render_template, request, send_file
2021-03-10 15:56:40 +00:00
URLS = {"/": "Build dataset", "/train": "Train", "/synthesis-setup": "Synthesis"}
TEXT_FILE = "text.txt"
ALIGNMENT_FILE = "align.json"
AUDIO_FOLDER = "wavs"
METADATA_FILE = "metadata.csv"
2021-04-07 20:26:51 +01:00
INFO_FILE = "info.json"
2021-03-10 15:56:40 +00:00
CHECKPOINT_FOLDER = "checkpoints"
GRAPH_FILE = "graph.png"
RESULTS_FILE = "out.wav"
2021-05-02 17:09:05 +01:00
TEMP_DATASET_UPLOAD = "temp.zip"
2021-06-03 19:57:18 +01:00
TRANSCRIPTION_MODEL = "model.pbmm"
ALPHABET_FILE = "alphabet.txt"
2021-07-23 15:40:32 +01:00
ENGLISH_LANGUAGE = "English"
2021-03-10 15:56:40 +00:00
model = None
2021-04-24 16:06:12 +01:00
vocoder = None
2021-03-10 15:56:40 +00:00
inflect_engine = inflect.engine()
2021-06-03 19:57:18 +01:00
symbols = None
2021-03-10 15:56:40 +00:00
2021-07-17 17:08:59 +01:00
def get_languages():
2021-07-23 15:40:32 +01:00
return [ENGLISH_LANGUAGE] + os.listdir(paths["languages"])
2021-07-17 17:08:59 +01:00
def get_checkpoints():
# Checkpoints ordered by name (i.e. checkpoint_0, checkpoint_1000 etc.)
return {
model: sorted(
2021-07-26 13:14:41 +01:00
os.listdir(os.path.join(paths["models"], model)),
2021-07-26 17:06:51 +01:00
key=lambda name: int(name.split("_")[1]) if "_" in name and name.split("_")[1].isdigit() else 0,
2021-07-26 13:14:41 +01:00
reverse=True,
)
for model in os.listdir(paths["models"])
2021-07-27 20:58:04 +01:00
if os.listdir(os.path.join(paths["models"], model))
}
2021-03-13 15:37:10 +00:00
@app.errorhandler(Exception)
def handle_bad_request(e):
2021-03-22 19:34:45 +00:00
error = {"type": e.__class__.__name__, "text": str(e), "stacktrace": traceback.format_exc()}
2021-03-13 15:37:10 +00:00
return render_template("error.html", error=error)
2021-03-10 15:56:40 +00:00
@app.context_processor
def inject_data():
2021-04-04 15:26:31 +01:00
return {"urls": URLS, "path": request.path}
2021-03-10 15:56:40 +00:00
2021-04-04 15:26:31 +01:00
# Dataset
@app.route("/", methods=["GET"])
def get_create_dataset():
2021-07-23 15:40:32 +01:00
return render_template("index.html", datasets=os.listdir(paths["datasets"]), languages=get_languages())
2021-03-10 15:56:40 +00:00
2021-03-23 15:47:46 +00:00
2021-04-04 15:26:31 +01:00
@app.route("/datasource", methods=["GET"])
def get_datasource():
return render_template("datasource.html")
2021-03-23 15:47:46 +00:00
2021-03-10 15:56:40 +00:00
@app.route("/", methods=["POST"])
def create_dataset_post():
2021-07-03 12:54:45 +01:00
min_confidence = float(request.form["confidence"])
2021-06-03 19:57:18 +01:00
language = request.form["language"]
2021-07-23 15:40:32 +01:00
transcription_model_path = (
os.path.join(paths["languages"], language, TRANSCRIPTION_MODEL) if language != ENGLISH_LANGUAGE else None
)
2021-07-17 17:08:59 +01:00
transcription_model = create_transcription_model(transcription_model_path)
2021-04-23 11:44:20 +01:00
2021-03-10 15:56:40 +00:00
if request.form["name"]:
output_folder = os.path.join(paths["datasets"], request.form["name"])
if os.path.exists(output_folder):
request.files = None
raise Exception("Dataset name taken")
2021-04-25 13:42:42 +01:00
os.makedirs(output_folder, exist_ok=True)
2021-03-10 15:56:40 +00:00
text_path = os.path.join(output_folder, TEXT_FILE)
2021-04-25 13:42:42 +01:00
audio_path = os.path.join(output_folder, request.files["audio_file"].filename)
2021-03-10 15:56:40 +00:00
forced_alignment_path = os.path.join(output_folder, ALIGNMENT_FILE)
output_path = os.path.join(output_folder, AUDIO_FOLDER)
label_path = os.path.join(output_folder, METADATA_FILE)
2021-04-07 20:26:51 +01:00
info_path = os.path.join(output_folder, INFO_FILE)
2021-03-10 15:56:40 +00:00
2021-07-03 13:19:47 +01:00
with open(text_path, "w", encoding=CHARACTER_ENCODING) as f:
f.write(request.files["text_file"].read().decode(CHARACTER_ENCODING, "ignore").replace("\r\n", "\n"))
2021-03-10 15:56:40 +00:00
request.files["audio_file"].save(audio_path)
start_progress_thread(
create_dataset,
text_path=text_path,
audio_path=audio_path,
2021-06-04 11:23:29 +01:00
transcription_model=transcription_model,
2021-03-10 15:56:40 +00:00
forced_alignment_path=forced_alignment_path,
output_path=output_path,
label_path=label_path,
2021-04-07 20:26:51 +01:00
info_path=info_path,
2021-07-03 12:54:45 +01:00
min_confidence=min_confidence,
2021-03-10 15:56:40 +00:00
)
else:
2021-07-25 19:49:26 +01:00
output_folder = os.path.join(paths["datasets"], request.form["dataset"])
2021-04-01 19:12:38 +01:00
suffix = get_suffix()
text_path = os.path.join(output_folder, f"text-{suffix}.txt")
audio_path = os.path.join(output_folder, f"audio-{suffix}.mp3")
forced_alignment_path = os.path.join(output_folder, f"align-{suffix}.json")
2021-04-07 20:26:51 +01:00
info_path = os.path.join(output_folder, INFO_FILE)
2021-03-10 15:56:40 +00:00
2021-07-03 13:19:47 +01:00
with open(text_path, "w", encoding=CHARACTER_ENCODING) as f:
f.write(request.files["text_file"].read().decode(CHARACTER_ENCODING, "ignore").replace("\r\n", "\n"))
2021-03-10 15:56:40 +00:00
request.files["audio_file"].save(audio_path)
existing_output_path = os.path.join(output_folder, AUDIO_FOLDER)
existing_label_path = os.path.join(output_folder, METADATA_FILE)
start_progress_thread(
extend_existing_dataset,
text_path=text_path,
audio_path=audio_path,
2021-06-04 11:23:29 +01:00
transcription_model=transcription_model,
2021-03-10 15:56:40 +00:00
forced_alignment_path=forced_alignment_path,
output_path=existing_output_path,
label_path=existing_label_path,
2021-04-01 19:12:38 +01:00
suffix=suffix,
2021-04-07 20:26:51 +01:00
info_path=info_path,
2021-07-03 12:54:45 +01:00
min_confidence=min_confidence,
2021-03-10 15:56:40 +00:00
)
return render_template("progress.html", next_url=get_next_url(URLS, request.path))
@app.route("/dataset-duration", methods=["GET"])
def get_dataset_duration():
dataset = request.values["dataset"]
2021-04-04 15:26:31 +01:00
dataset_error = validate_dataset(
os.path.join(paths["datasets"], dataset), metadata_file=METADATA_FILE, audio_folder=AUDIO_FOLDER
)
2021-04-04 13:19:08 +01:00
if not dataset_error:
2021-04-07 20:26:51 +01:00
duration, total_clips = get_total_audio_duration(os.path.join(paths["datasets"], dataset, INFO_FILE))
2021-04-04 13:19:08 +01:00
return {"duration": duration, "total_clips": total_clips}
else:
return {"error": dataset_error}
2021-03-10 15:56:40 +00:00
2021-04-04 15:26:31 +01:00
# Training
@app.route("/train", methods=["GET"])
def get_train():
cuda_enabled = torch.cuda.is_available()
if cuda_enabled:
available_memory_gb = get_available_memory()
batch_size = get_batch_size(available_memory_gb)
else:
batch_size = None
return render_template(
2021-06-04 14:47:47 +01:00
"train.html",
cuda_enabled=cuda_enabled,
batch_size=batch_size,
datasets=os.listdir(paths["datasets"]),
2021-07-25 20:19:37 +01:00
checkpoints=get_checkpoints(),
2021-07-17 17:08:59 +01:00
languages=get_languages(),
2021-04-04 15:26:31 +01:00
)
2021-03-10 15:56:40 +00:00
@app.route("/train", methods=["POST"])
def train_post():
2021-06-03 19:57:18 +01:00
language = request.form["language"]
2021-07-23 15:51:33 +01:00
alphabet_path = os.path.join(paths["languages"], language, ALPHABET_FILE) if language != ENGLISH_LANGUAGE else None
2021-07-25 19:49:26 +01:00
dataset_name = request.form["dataset"]
2021-03-10 15:56:40 +00:00
epochs = request.form["epochs"]
2021-04-14 23:53:33 +01:00
batch_size = request.form["batch_size"]
2021-04-15 14:58:41 +01:00
early_stopping = request.form.get("early_stopping") is not None
2021-04-25 13:04:59 +01:00
iters_per_checkpoint = request.form["checkpoint_frequency"]
2021-07-27 20:58:04 +01:00
train_size = 1 - float(request.form["validation_size"])
overwrite_checkpoints = request.form.get("overwrite_checkpoints") is not None
2021-07-17 17:43:20 +01:00
multi_gpu = request.form.get("multi_gpu") is not None
2021-07-26 13:14:41 +01:00
checkpoint_path = (
2021-07-27 20:58:04 +01:00
os.path.join(paths["models"], dataset_name, request.form["checkpoint"])
if request.form.get("checkpoint")
else None
2021-07-26 13:14:41 +01:00
)
2021-03-13 16:29:49 +00:00
2021-03-10 15:56:40 +00:00
metadata_path = os.path.join(paths["datasets"], dataset_name, METADATA_FILE)
audio_folder = os.path.join(paths["datasets"], dataset_name, AUDIO_FOLDER)
checkpoint_folder = os.path.join(paths["models"], dataset_name)
2021-03-22 21:05:33 +00:00
pretrained_folder = os.path.join(paths["pretrained"], dataset_name)
2021-03-10 15:56:40 +00:00
2021-03-13 16:29:49 +00:00
if request.files.get("pretrained_model"):
2021-03-22 21:05:33 +00:00
os.makedirs(pretrained_folder, exist_ok=True)
transfer_learning_path = os.path.join(pretrained_folder, "pretrained.pt")
request.files["pretrained_model"].save(transfer_learning_path)
2021-03-13 16:29:49 +00:00
else:
2021-03-22 21:05:33 +00:00
transfer_learning_path = None
2021-03-13 16:29:49 +00:00
2021-03-10 15:56:40 +00:00
start_progress_thread(
train,
metadata_path=metadata_path,
dataset_directory=audio_folder,
output_directory=checkpoint_folder,
2021-06-03 19:57:18 +01:00
alphabet_path=alphabet_path,
2021-07-25 20:19:37 +01:00
checkpoint_path=checkpoint_path,
2021-03-22 21:05:33 +00:00
transfer_learning_path=transfer_learning_path,
2021-03-10 15:56:40 +00:00
epochs=int(epochs),
2021-04-15 00:23:55 +01:00
batch_size=int(batch_size),
2021-04-23 11:50:11 +01:00
early_stopping=early_stopping,
2021-07-17 17:43:20 +01:00
multi_gpu=multi_gpu,
overwrite_checkpoints=overwrite_checkpoints,
2021-04-25 13:42:59 +01:00
iters_per_checkpoint=int(iters_per_checkpoint),
2021-07-27 20:58:04 +01:00
train_size=train_size,
2021-03-10 15:56:40 +00:00
)
return render_template("progress.html", next_url=get_next_url(URLS, request.path))
2021-04-04 15:26:31 +01:00
# Synthesis
@app.route("/synthesis-setup", methods=["GET"])
def get_synthesis_setup():
return render_template(
2021-04-24 20:48:06 +01:00
"synthesis-setup.html",
waveglow_models=os.listdir(paths["waveglow"]),
hifigan_models=os.listdir(paths["hifigan"]),
models=os.listdir(paths["models"]),
checkpoints=get_checkpoints(),
2021-07-17 17:08:59 +01:00
languages=get_languages(),
2021-04-04 15:26:31 +01:00
)
2021-03-10 15:56:40 +00:00
@app.route("/synthesis-setup", methods=["POST"])
def synthesis_setup_post():
2021-07-27 15:23:13 +01:00
global model, vocoder, symbols
2021-07-26 21:38:13 +01:00
vocoder_type = request.form["vocoder_type"]
2021-04-24 16:06:12 +01:00
if vocoder_type == "hifigan":
2021-07-26 21:38:13 +01:00
hifigan_folder = os.path.join(paths["hifigan"], request.form["vocoder"])
model_path = os.path.join(hifigan_folder, "model.pt")
model_config_path = os.path.join(hifigan_folder, "config.json")
2021-07-27 15:23:13 +01:00
vocoder = Hifigan(model_path, model_config_path)
2021-04-24 16:06:12 +01:00
elif vocoder_type == "waveglow":
2021-07-26 21:38:13 +01:00
model_path = os.path.join(paths["waveglow"], request.form["vocoder"])
2021-07-27 15:23:13 +01:00
vocoder = Waveglow(model_path)
2021-03-10 15:56:40 +00:00
else:
2021-04-24 16:06:12 +01:00
return render_template("synthesis-setup.html", error="Invalid vocoder selected")
2021-03-10 15:56:40 +00:00
2021-07-25 19:49:26 +01:00
dataset_name = request.form["model"]
2021-06-03 19:57:18 +01:00
language = request.form["language"]
alphabet_path = os.path.join(paths["languages"], language, ALPHABET_FILE)
2021-07-25 18:12:27 +01:00
symbols = load_symbols(alphabet_path) if language != ENGLISH_LANGUAGE else DEFAULT_ALPHABET
2021-03-10 15:56:40 +00:00
checkpoint_folder = os.path.join(paths["models"], dataset_name)
checkpoint = os.path.join(checkpoint_folder, request.form["checkpoint"])
2021-03-10 15:56:40 +00:00
model = load_model(checkpoint)
return redirect("/synthesis")
2021-03-18 12:44:21 +00:00
@app.route("/data/results/<path:path>")
2021-03-18 12:27:38 +00:00
def get_result_file(path):
filename = path.split("/")[-1]
mimetype = "image/png" if filename.endswith("png") else "audio/wav"
2021-03-18 12:44:21 +00:00
with open(os.path.join(paths["results"], path), "rb") as f:
return send_file(io.BytesIO(f.read()), attachment_filename=filename, mimetype=mimetype, as_attachment=True)
2021-03-18 12:27:38 +00:00
2021-04-05 15:47:46 +01:00
@app.route("/synthesis", methods=["GET", "POST"])
2021-03-10 15:56:40 +00:00
def synthesis_post():
2021-06-03 19:57:18 +01:00
global model, vocoder, symbols
if not model or not vocoder or not symbols:
2021-04-04 15:26:31 +01:00
return redirect("/synthesis-setup")
2021-03-10 15:56:40 +00:00
2021-04-05 15:47:46 +01:00
if request.method == "GET":
return render_template("synthesis.html")
else:
text = request.form["text"]
2021-04-23 13:47:52 +01:00
folder_name = get_suffix()
2021-04-05 15:47:46 +01:00
results_folder = os.path.join(paths["results"], folder_name)
os.makedirs(results_folder)
graph_path = os.path.join(results_folder, GRAPH_FILE)
audio_path = os.path.join(results_folder, RESULTS_FILE)
graph_web_path = graph_path.replace("\\", "/")
audio_web_path = audio_path.replace("\\", "/")
silence = float(request.form["silence"])
max_decoder_steps = int(request.form["max_decoder_steps"])
synthesize(
model,
text,
inflect_engine,
symbols,
graph_path,
audio_path,
vocoder,
silence,
max_decoder_steps=max_decoder_steps,
)
2021-04-05 15:47:46 +01:00
return render_template(
"synthesis.html",
text=text.strip(),
graph=graph_web_path,
audio=audio_web_path,
silence=silence,
max_decoder_steps=max_decoder_steps,
2021-04-05 15:47:46 +01:00
)
2021-03-10 15:56:40 +00:00
2021-04-04 15:26:31 +01:00
# Import-export
2021-03-10 15:56:40 +00:00
@app.route("/import-export", methods=["GET"])
def import_export():
2021-04-15 13:27:50 +01:00
return render_template(
2021-07-26 13:14:41 +01:00
"import-export.html",
datasets=os.listdir(paths["datasets"]),
models=os.listdir(paths["models"]),
checkpoints=get_checkpoints(),
2021-04-15 13:27:50 +01:00
)
2021-03-10 15:56:40 +00:00
@app.route("/upload-dataset", methods=["POST"])
def upload_dataset():
dataset = request.files["dataset"]
2021-05-02 17:09:05 +01:00
dataset.save(TEMP_DATASET_UPLOAD)
2021-03-10 15:56:40 +00:00
dataset_name = request.values["name"]
dataset_directory = os.path.join(paths["datasets"], dataset_name)
audio_folder = os.path.join(dataset_directory, AUDIO_FOLDER)
2021-04-07 20:26:51 +01:00
assert not os.path.isdir(dataset_directory), "Output folder already exists"
2021-03-10 15:56:40 +00:00
2021-05-02 17:09:05 +01:00
start_progress_thread(
2021-05-02 17:09:23 +01:00
import_dataset, dataset=TEMP_DATASET_UPLOAD, dataset_directory=dataset_directory, audio_folder=audio_folder
2021-05-02 17:09:05 +01:00
)
2021-05-02 17:09:23 +01:00
2021-05-02 17:09:05 +01:00
return render_template("progress.html", next_url="/import-export")
2021-03-10 15:56:40 +00:00
@app.route("/download-dataset", methods=["POST"])
def download_dataset():
dataset_name = request.values["dataset"]
dataset_directory = os.path.join(paths["datasets"], dataset_name)
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as z:
2021-04-15 13:27:50 +01:00
z.write(os.path.join(dataset_directory, METADATA_FILE), METADATA_FILE)
if os.path.isfile(os.path.join(dataset_directory, INFO_FILE)):
z.write(os.path.join(dataset_directory, INFO_FILE), INFO_FILE)
2021-03-10 15:56:40 +00:00
audio_directory = os.path.join(dataset_directory, AUDIO_FOLDER)
for audiofile in os.listdir(audio_directory):
z.write(os.path.join(audio_directory, audiofile), os.path.join(AUDIO_FOLDER, audiofile))
data.seek(0)
return send_file(
data,
mimetype="application/zip",
as_attachment=True,
attachment_filename=f'{dataset_name.replace(" ", "_")}.zip',
)
@app.route("/upload-model", methods=["POST"])
def upload_model():
model_name = request.values["name"]
model_directory = os.path.join(paths["models"], model_name)
os.makedirs(model_directory, exist_ok=False)
model_path = os.path.join(model_directory, "model.pt")
2021-07-25 19:49:26 +01:00
request.files["model_upload"].save(model_path)
2021-03-10 15:56:40 +00:00
return render_template("import-export.html", message=f"Successfully uploaded {model_name} model")
@app.route("/download-model", methods=["POST"])
def download_model():
model_name = request.values["model"]
2021-07-25 19:49:26 +01:00
model_path = os.path.join(paths["models"], model_name, request.values["checkpoint"])
2021-03-10 15:56:40 +00:00
2021-07-25 19:49:26 +01:00
return send_file(model_path, as_attachment=True, attachment_filename=request.values["checkpoint"])
2021-04-04 18:52:36 +01:00
# Settings
@app.route("/settings", methods=["GET"])
def get_settings():
2021-04-07 20:26:51 +01:00
return render_template(
"settings.html",
datasets=os.listdir(paths["datasets"]),
models=os.listdir(paths["models"]),
)
2021-04-04 18:52:36 +01:00
@app.route("/delete-dataset", methods=["POST"])
def delete_dataset_post():
delete_folder(os.path.join(paths["datasets"], request.values["dataset"]))
return redirect("/settings")
@app.route("/delete-model", methods=["POST"])
def delete_model_post():
delete_folder(os.path.join(paths["models"], request.values["model"]))
return redirect("/settings")
2021-06-03 20:34:16 +01:00
@app.route("/upload-language", methods=["POST"])
def upload_language():
language = request.values["name"]
language_dir = os.path.join(paths["languages"], language)
os.makedirs(language_dir, exist_ok=True)
request.files["model"].save(os.path.join(language_dir, TRANSCRIPTION_MODEL))
request.files["alphabet"].save(os.path.join(language_dir, ALPHABET_FILE))
return redirect("/settings")
2021-07-26 21:38:13 +01:00
@app.route("/add-vocoder", methods=["POST"])
def add_vocoder():
vocoder_type = request.values["vocoder"]
name = request.values["name"]
if vocoder_type == "hifigan":
hifigan_folder = os.path.join(paths["hifigan"], name)
os.makedirs(hifigan_folder)
model_path = os.path.join(hifigan_folder, "model.pt")
model_config_path = os.path.join(hifigan_folder, "config.json")
request.files["hifigan-model"].save(model_path)
request.files["hifigan-config"].save(model_config_path)
else:
2021-07-27 20:58:04 +01:00
model_path = os.path.join(paths["waveglow"], name + ".pt")
2021-07-26 21:38:13 +01:00
request.files["waveglow"].save(model_path)
return redirect("/settings")