From 1d0bc1b6b25cd4b5199d4a3883d8f5df5277f756 Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Wed, 25 Apr 2018 05:45:32 -0700 Subject: [PATCH] make attn guiding optional #2 --- config.json | 2 +- train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config.json b/config.json index 27d630a5..3d0be81e 100644 --- a/config.json +++ b/config.json @@ -16,7 +16,7 @@ "batch_size": 32, "eval_batch_size":32, "r": 5, - "mk": 1, + "mk": 0, "griffin_lim_iters": 60, "power": 1.2, diff --git a/train.py b/train.py index e798c778..4b8a3a2a 100644 --- a/train.py +++ b/train.py @@ -126,6 +126,7 @@ def train(model, criterion, data_loader, optimizer, epoch): if c.mk > 0.0: attention_loss = criterion(alignments, M, mel_lengths_var) loss += mk * attention_loss + avg_attn_loss += attention_loss.data[0] # backpass and check the grad norm loss.backward() @@ -148,7 +149,6 @@ def train(model, criterion, data_loader, optimizer, epoch): ('grad_norm', grad_norm)]) avg_linear_loss += linear_loss.data[0] avg_mel_loss += mel_loss.data[0] - avg_attn_loss += attention_loss.data[0] # Plot Training Iter Stats tb.add_scalar('TrainIterLoss/TotalLoss', loss.data[0], current_step)