mirror of https://github.com/coqui-ai/TTS.git
Make attn windowing optional
parent
8969d59902
commit
f4fa155cd3
|
@ -31,6 +31,7 @@
|
|||
"lr": 0.001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
||||
"lr_decay": false, // if true, Noam learning rate decaying is applied through training.
|
||||
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
|
||||
"windowing": true, // Enables attention windowing. Used only in eval mode.
|
||||
|
||||
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.
|
||||
"eval_batch_size":32,
|
||||
|
|
|
@ -160,8 +160,7 @@ class AttentionRNNCell(nn.Module):
|
|||
mask = mask.view(memory.size(0), -1)
|
||||
alignment.masked_fill_(1 - mask, -float("inf"))
|
||||
# Windowing
|
||||
if not self.training:
|
||||
# print(" > Windowing active")
|
||||
if not self.training and self.windowing:
|
||||
back_win = self.win_idx - self.win_back
|
||||
front_win = self.win_idx + self.win_front
|
||||
if back_win > 0:
|
||||
|
|
Loading…
Reference in New Issue