From 537879482dc31592f51fb8cae79919615091d49a Mon Sep 17 00:00:00 2001 From: Thomas Werkmeister Date: Tue, 23 Jul 2019 13:31:10 +0200 Subject: [PATCH] fixed config comment strings for attention parameters --- config.json | 12 ++++++------ config_libritts.json | 10 +++++----- config_tacotron.json | 4 ++-- config_tacotron2.json | 10 +++++----- config_tacotron_de.json | 12 ++++++------ config_tacotron_gst.json | 4 ++-- 6 files changed, 26 insertions(+), 26 deletions(-) diff --git a/config.json b/config.json index 807c4c60..24d26e16 100644 --- a/config.json +++ b/config.json @@ -40,12 +40,12 @@ "windowing": false, // Enables attention windowing. Used only in eval mode. "memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5. "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. - "prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn". - "prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet. - "use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. - "forward_attn_mask": false, - "transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention. - "location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default. + "prenet_type": "original", // "original" or "bn". + "prenet_dropout": true, // enable/disable dropout at prenet. + "use_forward_attn": true, // enable/disable forward attention. In general, it aligns faster. + "forward_attn_mask": false, // Apply forward attention mask at inference to prevent bad modes. Try it if your model does not align well. + "transition_agent": false, // enable/disable transition agent of forward attention. + "location_attn": false, // enable_disable location sensitive attention. "loss_masking": true, // enable / disable loss masking against the sequence padding. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "stopnet": true, // Train stopnet predicting the end of synthesis. diff --git a/config_libritts.json b/config_libritts.json index f9a752ec..5579e565 100644 --- a/config_libritts.json +++ b/config_libritts.json @@ -39,13 +39,13 @@ "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5. "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. - "prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn". - "prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet. + "prenet_type": "original", // "original" or "bn". + "prenet_dropout": true, // enable/disable dropout at prenet. "windowing": false, // Enables attention windowing. Used only in eval mode. - "use_forward_attn": false, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. + "use_forward_attn": false, // enable/disable forward attention. In general, it aligns faster. "forward_attn_mask": false, - "transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention. - "location_attn": true, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default. + "transition_agent": false, // enable/disable transition agent of forward attention. + "location_attn": true, // enable_disable location sensitive attention. "loss_masking": true, // enable / disable loss masking against the sequence padding. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "stopnet": true, // Train stopnet predicting the end of synthesis. diff --git a/config_tacotron.json b/config_tacotron.json index 127a4b3d..92ee3909 100644 --- a/config_tacotron.json +++ b/config_tacotron.json @@ -42,10 +42,10 @@ "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. "prenet_type": "original", // "original" or "bn". "prenet_dropout": true, // enable/disable dropout at prenet. - "use_forward_attn": true, // if it uses forward attention. In general, it aligns faster. + "use_forward_attn": true, // enable/disable forward attention. In general, it aligns faster. "forward_attn_mask": false, // Apply forward attention mask af inference to prevent bad modes. Try it if your model does not align well. "transition_agent": true, // enable/disable transition agent of forward attention. - "location_attn": false, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. + "location_attn": false, // enable_disable location sensitive attention. "loss_masking": true, // enable / disable loss masking against the sequence padding. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "stopnet": true, // Train stopnet predicting the end of synthesis. diff --git a/config_tacotron2.json b/config_tacotron2.json index fd188d20..02b4341b 100644 --- a/config_tacotron2.json +++ b/config_tacotron2.json @@ -39,12 +39,12 @@ "warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr" "memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5. "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. - "prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn". - "prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet. - "use_forward_attn": true, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. + "prenet_type": "original", // "original" or "bn". + "prenet_dropout": true, // enable/disable dropout at prenet. + "use_forward_attn": true, // enable/disable forward attention. In general, it aligns faster. "forward_attn_mask": false, // Apply forward attention mask af inference to prevent bad modes. Try it if your model does not align well. - "transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention. - "location_attn": false, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default. + "transition_agent": false, // enable/disable transition agent of forward attention. + "location_attn": false, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "loss_masking": true, // enable / disable loss masking against the sequence padding. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "stopnet": true, // Train stopnet predicting the end of synthesis. diff --git a/config_tacotron_de.json b/config_tacotron_de.json index 834bfed4..fc3efbec 100644 --- a/config_tacotron_de.json +++ b/config_tacotron_de.json @@ -40,12 +40,12 @@ "windowing": false, // Enables attention windowing. Used only in eval mode. "memory_size": 5, // ONLY TACOTRON - memory queue size used to queue network predictions to feed autoregressive connection. Useful if r < 5. "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. - "prenet_type": "original", // ONLY TACOTRON2 - "original" or "bn". - "prenet_dropout": true, // ONLY TACOTRON2 - enable/disable dropout at prenet. - "use_forward_attn": false, // ONLY TACOTRON2 - if it uses forward attention. In general, it aligns faster. - "transition_agent": false, // ONLY TACOTRON2 - enable/disable transition agent of forward attention. - "forward_attn_mask": false, - "location_attn": true, // ONLY TACOTRON2 - enable_disable location sensitive attention. It is enabled for TACOTRON by default. + "prenet_type": "original", // "original" or "bn". + "prenet_dropout": true, // enable/disable dropout at prenet. + "use_forward_attn": false, // enable/disable forward attention. In general, it aligns faster. + "transition_agent": false, // enable/disable transition agent of forward attention. + "forward_attn_mask": false, // Apply forward attention mask at inference to prevent bad modes. Try it if your model does not align well. + "location_attn": true, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "loss_masking": true, // enable / disable loss masking against the sequence padding. "enable_eos_bos_chars": false, // enable/disable beginning of sentence and end of sentence chars. "stopnet": true, // Train stopnet predicting the end of synthesis. diff --git a/config_tacotron_gst.json b/config_tacotron_gst.json index 98fafa54..5a0f2c09 100644 --- a/config_tacotron_gst.json +++ b/config_tacotron_gst.json @@ -42,8 +42,8 @@ "attention_norm": "sigmoid", // softmax or sigmoid. Suggested to use softmax for Tacotron2 and sigmoid for Tacotron. "prenet_type": "original", // "original" or "bn". "prenet_dropout": true, // enable/disable dropout at prenet. - "use_forward_attn": true, // if it uses forward attention. In general, it aligns faster. - "forward_attn_mask": false, // Apply forward attention mask af inference to prevent bad modes. Try it if your model does not align well. + "use_forward_attn": true, // enable/disable forward attention. In general, it aligns faster. + "forward_attn_mask": false, // Apply forward attention mask at inference to prevent bad modes. Try it if your model does not align well. "transition_agent": false, // enable/disable transition agent of forward attention. "location_attn": false, // enable_disable location sensitive attention. It is enabled for TACOTRON by default. "loss_masking": true, // enable / disable loss masking against the sequence padding.