From 084846f83316b923812dbeb008de1bef6d49b0ba Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Mon, 31 Dec 2018 14:47:53 +0100 Subject: [PATCH] partial commit --- README.md | 4 ++++ config.json | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5f874100..74697340 100644 --- a/README.md +++ b/README.md @@ -106,6 +106,10 @@ You can also enjoy Tensorboard, if you point the Tensorboard argument```--logdir ## Testing Best way to test your pre-trained network is to use Notebooks under ```notebooks``` folder. +## Logging +# TODO +TTS enables intense logging on Tensorboard. + ## What is new with TTS If you train TTS with LJSpeech dataset, you start to hear reasonable results after 12.5K iterations with batch size 32. This is the fastest training with character based methods up to our knowledge. Out implementation is also quite robust against long sentences. diff --git a/config.json b/config.json index dde96b94..178c1b68 100644 --- a/config.json +++ b/config.json @@ -49,7 +49,7 @@ "dataset": "tweb", // one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py "min_seq_len": 0, // minimum text length to use in training "max_seq_len": 300, // maximum text length - "output_path": "../keep/", // output path for all training outputs. + "output_path": "/media/erogol/data_ssd/Data/models/tweb_models/", // output path for all training outputs. "num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values. "num_val_loader_workers": 4 // number of evaluation data loader processes. }