mirror of https://github.com/coqui-ai/TTS.git
config_cluster update
parent
c6f2382962
commit
1898763028
|
@ -30,7 +30,7 @@
|
||||||
},
|
},
|
||||||
|
|
||||||
"model": "Tacotron2", // one of the model in models/
|
"model": "Tacotron2", // one of the model in models/
|
||||||
"grad_clip": 0.05, // upper limit for gradients for clipping.
|
"grad_clip": 1, // upper limit for gradients for clipping.
|
||||||
"epochs": 1000, // total number of epochs to train.
|
"epochs": 1000, // total number of epochs to train.
|
||||||
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
"lr": 0.0001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
||||||
"lr_decay": false, // if true, Noam learning rate decaying is applied through training.
|
"lr_decay": false, // if true, Noam learning rate decaying is applied through training.
|
||||||
|
@ -41,21 +41,21 @@
|
||||||
"batch_size": 16, // Batch size for training. Lower values than 32 might cause hard to learn attention.
|
"batch_size": 16, // Batch size for training. Lower values than 32 might cause hard to learn attention.
|
||||||
"eval_batch_size":16,
|
"eval_batch_size":16,
|
||||||
"r": 1, // Number of frames to predict for step.
|
"r": 1, // Number of frames to predict for step.
|
||||||
"wd": 0.000002, // Weight decay weight.
|
"wd": 0.000001, // Weight decay weight.
|
||||||
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
||||||
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
|
"save_step": 1000, // Number of training steps expected to save traning stats and checkpoints.
|
||||||
"print_step": 10, // Number of steps to log traning on console.
|
"print_step": 10, // Number of steps to log traning on console.
|
||||||
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
"tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
||||||
"batch_group_size": 4, //Number of batches to shuffle after bucketing.
|
"batch_group_size": 8, //Number of batches to shuffle after bucketing.
|
||||||
|
|
||||||
"run_eval": true,
|
"run_eval": true,
|
||||||
"test_delay_epochs": 100, //Until attention is aligned, testing only wastes computation time.
|
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
|
||||||
"data_path": "/media/erogol/data_ssd/Data/LJSpeech-1.1", // DATASET-RELATED: can overwritten from command argument
|
"data_path": "/media/erogol/data_ssd/Data/LJSpeech-1.1", // DATASET-RELATED: can overwritten from command argument
|
||||||
"meta_file_train": "prompts_train.data", // DATASET-RELATED: metafile for training dataloader.
|
"meta_file_train": "prompts_train.data", // DATASET-RELATED: metafile for training dataloader.
|
||||||
"meta_file_val": "prompts_val.data", // DATASET-RELATED: metafile for evaluation dataloader.
|
"meta_file_val": "prompts_val.data", // DATASET-RELATED: metafile for evaluation dataloader.
|
||||||
"dataset": "nancy", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py
|
"dataset": "nancy", // DATASET-RELATED: one of TTS.dataset.preprocessors depending on your target dataset. Use "tts_cache" for pre-computed dataset by extract_features.py
|
||||||
"min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training
|
"min_seq_len": 0, // DATASET-RELATED: minimum text length to use in training
|
||||||
"max_seq_len": 50, // DATASET-RELATED: maximum text length
|
"max_seq_len": 120, // DATASET-RELATED: maximum text length
|
||||||
"output_path": "../keep/", // DATASET-RELATED: output path for all training outputs.
|
"output_path": "../keep/", // DATASET-RELATED: output path for all training outputs.
|
||||||
"num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values.
|
"num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values.
|
||||||
"num_val_loader_workers": 4, // number of evaluation data loader processes.
|
"num_val_loader_workers": 4, // number of evaluation data loader processes.
|
||||||
|
|
Loading…
Reference in New Issue