diff --git a/run_tests.sh b/run_tests.sh index d7bbcf89..ccc035e5 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -13,6 +13,7 @@ nosetests tests -x &&\ ./tests/test_vocoder_wavernn_train.sh && \ ./tests/test_vocoder_wavegrad_train.sh && \ ./tests/test_speedy_speech_train.sh && \ +./tests/test_align_tts_train.sh && \ ./tests/test_compute_statistics.sh && \ # linter check diff --git a/tests/test_glow_tts.py b/tests/test_glow_tts.py index 38de84a9..e9fdc761 100644 --- a/tests/test_glow_tts.py +++ b/tests/test_glow_tts.py @@ -59,7 +59,7 @@ class GlowTTSTrainTest(unittest.TestCase): use_encoder_prenet=True, num_flow_blocks_dec=12, kernel_size_dec=5, - dilation_rate=5, + dilation_rate=1, num_block_layers=4, dropout_p_dec=0., num_speakers=0, @@ -88,7 +88,7 @@ class GlowTTSTrainTest(unittest.TestCase): use_encoder_prenet=True, num_flow_blocks_dec=12, kernel_size_dec=5, - dilation_rate=5, + dilation_rate=1, num_block_layers=4, dropout_p_dec=0., num_speakers=0, @@ -111,11 +111,11 @@ class GlowTTSTrainTest(unittest.TestCase): assert (param - param_ref).sum() == 0, param count += 1 - optimizer = optim.Adam(model.parameters(), lr=c.lr) + optimizer = optim.Adam(model.parameters(), lr=0.001) for _ in range(5): + optimizer.zero_grad() z, logdet, y_mean, y_log_scale, alignments, o_dur_log, o_total_dur = model.forward( input_dummy, input_lengths, mel_spec, mel_lengths, None) - optimizer.zero_grad() loss_dict = criterion(z, y_mean, y_log_scale, logdet, mel_lengths, o_dur_log, o_total_dur, input_lengths) loss = loss_dict['loss']