TTS/tests/vocoder_tests/test_wavernn_train.py

46 lines
1.3 KiB
Python
Raw Normal View History

import glob
import os
import shutil
2021-05-10 21:03:21 +00:00
from tests import get_device_id, get_tests_output_path, run_cli
from TTS.vocoder.configs import WavernnConfig
2021-06-18 11:27:19 +00:00
from TTS.vocoder.models.wavernn import WavernnArgs
config_path = os.path.join(get_tests_output_path(), "test_vocoder_config.json")
output_path = os.path.join(get_tests_output_path(), "train_outputs")
2021-06-18 11:27:19 +00:00
config = WavernnConfig(
2021-09-30 14:33:18 +00:00
model_args=WavernnArgs(),
batch_size=8,
eval_batch_size=8,
num_loader_workers=0,
2021-06-18 11:27:19 +00:00
num_eval_loader_workers=0,
run_eval=True,
test_delay_epochs=-1,
epochs=1,
2021-05-10 13:27:23 +00:00
seq_len=256, # for shorter test time
eval_split_size=1,
print_step=1,
print_eval=True,
data_path="tests/data/ljspeech",
2021-05-07 13:39:48 +00:00
output_path=output_path,
)
config.audio.do_trim_silence = True
config.audio.trim_db = 60
config.save_json(config_path)
# train the model for one epoch
2021-06-18 11:27:19 +00:00
command_train = f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --config_path {config_path} "
run_cli(command_train)
# Find latest folder
2021-05-10 21:03:21 +00:00
continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime)
# restore the model and continue training for one more epoch
2021-05-10 21:03:21 +00:00
command_train = (
2021-06-18 11:27:19 +00:00
f"CUDA_VISIBLE_DEVICES='{get_device_id()}' python TTS/bin/train_vocoder.py --continue_path {continue_path} "
2021-05-10 21:03:21 +00:00
)
run_cli(command_train)
shutil.rmtree(continue_path)