config update

pull/10/head
Eren Golge 2019-05-14 18:08:57 +02:00
parent d8497d99c2
commit b14c11572e
1 changed files with 4 additions and 4 deletions

View File

@ -1,6 +1,6 @@
{
"run_name": "mozilla-no-loc",
"run_description": "using Bahdenau attention, with original prenet.",
"run_name": "mozilla-no-loc-fattn-stopnet",
"run_description": "using forward attention, with original prenet, merged stopnet. Compare this with ",
"audio":{
// Audio processing parameters
@ -42,12 +42,12 @@
"attention_norm": "softmax", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron.
"prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn".
"prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet.
"use_forward_attn": false, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster.
"use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster.
"transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention.
"location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default.
"loss_masking": false, // enable / disable loss masking against the sequence padding.
"enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars.
"stopnet": false, // Train stopnet predicting the end of synthesis.
"stopnet": true, // Train stopnet predicting the end of synthesis.
"separate_stopnet": false, // Train stopnet seperately if 'stopnet==true'. It prevents stopnet loss to influence the rest of the model. It causes a better model, but it trains SLOWER.
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.