bug fix on training avg loss printing and computing

pull/10/head
Eren Golge 2018-04-12 05:57:52 -07:00
parent 6c2f52b071
commit bc90050ee9
3 changed files with 17 additions and 20 deletions

View File

@ -285,14 +285,12 @@ class Decoder(nn.Module):
if greedy:
memory_input = outputs[-1]
else:
# TODO: try sampled teacher forcing
# combine prev. model output and prev. real target
# memory_input = torch.div(outputs[-1] + memory[t-1], 2.0)
# add a random noise
# noise = torch.autograd.Variable(
# memory_input.data.new(memory_input.size()).normal_(0.0, 0.5))
# memory_input = memory_input + noise
memory_input = memory[t-1]
combine prev. model output and prev. real target
memory_input = torch.div(outputs[-1] + memory[t-1], 2.0)
noise = torch.autograd.Variable(
memory_input.data.new(memory_input.size()).normal_(0.0, 2.0))
memory_input = memory_input + noise
# Prenet
processed_memory = self.prenet(memory_input)
# Attention RNN

View File

@ -136,6 +136,8 @@ def train(model, criterion, data_loader, optimizer, epoch):
linear_loss.data[0]),
('mel_loss', mel_loss.data[0]),
('grad_norm', grad_norm)])
avg_linear_loss += linear_loss.data[0]
avg_mel_loss += mel_loss.data[0]
# Plot Training Iter Stats
tb.add_scalar('TrainIterLoss/TotalLoss', loss.data[0], current_step)
@ -185,10 +187,9 @@ def train(model, criterion, data_loader, optimizer, epoch):
avg_total_loss = avg_mel_loss + avg_linear_loss
# Plot Training Epoch Stats
tb.add_scalar('TrainEpochLoss/TotalLoss', loss.data[0], current_step)
tb.add_scalar('TrainEpochLoss/LinearLoss',
linear_loss.data[0], current_step)
tb.add_scalar('TrainEpochLoss/MelLoss', mel_loss.data[0], current_step)
tb.add_scalar('TrainEpochLoss/TotalLoss', avg_total_loss, current_step)
tb.add_scalar('TrainEpochLoss/LinearLoss', avg_linear_loss, current_step)
tb.add_scalar('TrainEpochLoss/MelLoss', avg_mel_loss, current_step)
tb.add_scalar('Time/EpochTime', epoch_time, epoch)
epoch_time = 0
@ -198,14 +199,12 @@ def train(model, criterion, data_loader, optimizer, epoch):
def evaluate(model, criterion, data_loader, current_step):
model = model.eval()
epoch_time = 0
print(" | > Validation")
n_priority_freq = int(3000 / (c.sample_rate * 0.5) * c.num_freq)
progbar = Progbar(len(data_loader.dataset) / c.eval_batch_size)
avg_linear_loss = 0
avg_mel_loss = 0
print(" | > Validation")
progbar = Progbar(len(data_loader.dataset) / c.batch_size)
n_priority_freq = int(3000 / (c.sample_rate * 0.5) * c.num_freq)
for num_iter, data in enumerate(data_loader):
start_time = time.time()
@ -230,8 +229,8 @@ def evaluate(model, criterion, data_loader, current_step):
linear_spec_var = linear_spec_var.cuda()
# forward pass
mel_output, linear_output, alignments = model.forward(
text_input_var, mel_spec_var)
mel_output, linear_output, alignments =\
model.forward(text_input_var, mel_spec_var)
# loss computation
mel_loss = criterion(mel_output, mel_spec_var, mel_lengths_var)

View File

@ -103,7 +103,7 @@ def save_best_model(model, optimizer, model_loss, best_loss, out_path,
best_loss = model_loss
bestmodel_path = 'best_model.pth.tar'
bestmodel_path = os.path.join(out_path, bestmodel_path)
print("\n | > Best model saving with loss {0:.2f} : {1:}".format(
print(" | > Best model saving with loss {0:.2f} : {1:}".format(
model_loss, bestmodel_path))
torch.save(state, bestmodel_path)
return best_loss