Make attn windowing optional

pull/10/head
Eren Golge 2019-01-14 15:08:35 +01:00
parent 8969d59902
commit f4fa155cd3
2 changed files with 2 additions and 2 deletions

View File

@ -31,6 +31,7 @@
"lr": 0.001, // Initial learning rate. If Noam decay is active, maximum learning rate.
"lr_decay": false, // if true, Noam learning rate decaying is applied through training.
"warmup_steps": 4000, // Noam decay steps to increase the learning rate from 0 to "lr"
"windowing": true, // Enables attention windowing. Used only in eval mode.
"batch_size": 32, // Batch size for training. Lower values than 32 might cause hard to learn attention.
"eval_batch_size":32,

View File

@ -160,8 +160,7 @@ class AttentionRNNCell(nn.Module):
mask = mask.view(memory.size(0), -1)
alignment.masked_fill_(1 - mask, -float("inf"))
# Windowing
if not self.training:
# print(" > Windowing active")
if not self.training and self.windowing:
back_win = self.win_idx - self.win_back
front_win = self.win_idx + self.win_front
if back_win > 0: