From 2451a813a24169a071ad8add4b3bd2c0389230d5 Mon Sep 17 00:00:00 2001 From: gerazov Date: Mon, 15 Feb 2021 18:40:17 +0100 Subject: [PATCH] refactored keep_all_best --- TTS/bin/train_glow_tts.py | 6 +++--- TTS/bin/train_speedy_speech.py | 6 +++--- TTS/bin/train_tacotron.py | 6 +++--- TTS/bin/train_vocoder_gan.py | 6 +++--- TTS/bin/train_vocoder_wavegrad.py | 6 +++--- TTS/bin/train_vocoder_wavernn.py | 6 +++--- TTS/tts/configs/config.json | 4 ++-- TTS/tts/configs/glow_tts_gated_conv.json | 4 ++-- TTS/tts/configs/glow_tts_ljspeech.json | 4 ++-- TTS/tts/configs/ljspeech_tacotron2_dynamic_conv_attn.json | 4 ++-- TTS/tts/configs/speedy_speech_ljspeech.json | 4 ++-- TTS/utils/arguments.py | 4 ++-- TTS/vocoder/configs/multiband-melgan_and_rwd_config.json | 4 ++-- TTS/vocoder/configs/multiband_melgan_config.json | 4 ++-- TTS/vocoder/configs/multiband_melgan_config_mozilla.json | 4 ++-- TTS/vocoder/configs/parallel_wavegan_config.json | 4 ++-- TTS/vocoder/configs/universal_fullband_melgan.json | 4 ++-- TTS/vocoder/configs/wavegrad_libritts.json | 4 ++-- TTS/vocoder/configs/wavernn_config.json | 4 ++-- TTS/vocoder/utils/io.py | 4 ++-- tests/inputs/test_glow_tts.json | 4 ++-- tests/inputs/test_speedy_speech.json | 4 ++-- tests/inputs/test_train_config.json | 4 ++-- tests/inputs/test_vocoder_multiband_melgan_config.json | 4 ++-- tests/inputs/test_vocoder_wavegrad.json | 4 ++-- tests/inputs/test_vocoder_wavernn_config.json | 4 ++-- 26 files changed, 58 insertions(+), 58 deletions(-) diff --git a/TTS/bin/train_glow_tts.py b/TTS/bin/train_glow_tts.py index b4f7103a..2ec8eb1c 100644 --- a/TTS/bin/train_glow_tts.py +++ b/TTS/bin/train_glow_tts.py @@ -551,8 +551,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with loaded last best loss {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False # define dataloaders train_loader = setup_loader(ap, 1, is_val=False, verbose=True) @@ -574,7 +574,7 @@ def main(args): # pylint: disable=redefined-outer-name target_loss = eval_avg_loss_dict['avg_loss'] best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r, OUT_PATH, - keep_best=keep_best, keep_after=keep_after) + keep_all_best=keep_all_best, keep_after=keep_after) if __name__ == '__main__': diff --git a/TTS/bin/train_speedy_speech.py b/TTS/bin/train_speedy_speech.py index 307b02d3..9c8b490e 100644 --- a/TTS/bin/train_speedy_speech.py +++ b/TTS/bin/train_speedy_speech.py @@ -515,8 +515,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with loaded last best loss {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False # define dataloaders train_loader = setup_loader(ap, 1, is_val=False, verbose=True) @@ -536,7 +536,7 @@ def main(args): # pylint: disable=redefined-outer-name target_loss = eval_avg_loss_dict['avg_loss'] best_loss = save_best_model(target_loss, best_loss, model, optimizer, global_step, epoch, c.r, OUT_PATH, - keep_best=keep_best, keep_after=keep_after) + keep_all_best=keep_all_best, keep_after=keep_after) if __name__ == '__main__': diff --git a/TTS/bin/train_tacotron.py b/TTS/bin/train_tacotron.py index d59438cb..86f2c9d6 100644 --- a/TTS/bin/train_tacotron.py +++ b/TTS/bin/train_tacotron.py @@ -595,8 +595,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with loaded last best loss {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False # define data loaders train_loader = setup_loader(ap, @@ -648,7 +648,7 @@ def main(args): # pylint: disable=redefined-outer-name epoch, c.r, OUT_PATH, - keep_best=keep_best, + keep_all_best=keep_all_best, keep_after=keep_after, scaler=scaler.state_dict() if c.mixed_precision else None ) diff --git a/TTS/bin/train_vocoder_gan.py b/TTS/bin/train_vocoder_gan.py index 9043a560..708bf350 100644 --- a/TTS/bin/train_vocoder_gan.py +++ b/TTS/bin/train_vocoder_gan.py @@ -555,8 +555,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with best loss of {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False global_step = args.restore_step for epoch in range(0, c.epochs): @@ -581,7 +581,7 @@ def main(args): # pylint: disable=redefined-outer-name global_step, epoch, OUT_PATH, - keep_best=keep_best, + keep_all_best=keep_all_best, keep_after=keep_after, model_losses=eval_avg_loss_dict, ) diff --git a/TTS/bin/train_vocoder_wavegrad.py b/TTS/bin/train_vocoder_wavegrad.py index 271e8d4c..51a31509 100644 --- a/TTS/bin/train_vocoder_wavegrad.py +++ b/TTS/bin/train_vocoder_wavegrad.py @@ -403,8 +403,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with loaded last best loss {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False global_step = args.restore_step for epoch in range(0, c.epochs): @@ -426,7 +426,7 @@ def main(args): # pylint: disable=redefined-outer-name global_step, epoch, OUT_PATH, - keep_best=keep_best, + keep_all_best=keep_all_best, keep_after=keep_after, model_losses=eval_avg_loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None diff --git a/TTS/bin/train_vocoder_wavernn.py b/TTS/bin/train_vocoder_wavernn.py index 5fde5025..8e9c6a8b 100644 --- a/TTS/bin/train_vocoder_wavernn.py +++ b/TTS/bin/train_vocoder_wavernn.py @@ -426,8 +426,8 @@ def main(args): # pylint: disable=redefined-outer-name best_loss = torch.load(args.best_path, map_location='cpu')['model_loss'] print(f" > Starting with loaded last best loss {best_loss}.") - keep_best = c.get('keep_best', False) - keep_after = c.get('keep_after', 10000) # void if keep_best False + keep_all_best = c.get('keep_all_best', False) + keep_after = c.get('keep_after', 10000) # void if keep_all_best False global_step = args.restore_step for epoch in range(0, c.epochs): @@ -450,7 +450,7 @@ def main(args): # pylint: disable=redefined-outer-name global_step, epoch, OUT_PATH, - keep_best=keep_best, + keep_all_best=keep_all_best, keep_after=keep_after, model_losses=eval_avg_loss_dict, scaler=scaler.state_dict() if c.mixed_precision else None diff --git a/TTS/tts/configs/config.json b/TTS/tts/configs/config.json index 5bd249d9..ba33acc5 100644 --- a/TTS/tts/configs/config.json +++ b/TTS/tts/configs/config.json @@ -121,8 +121,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/tts/configs/glow_tts_gated_conv.json b/TTS/tts/configs/glow_tts_gated_conv.json index 865c6f29..c4d7b1e5 100644 --- a/TTS/tts/configs/glow_tts_gated_conv.json +++ b/TTS/tts/configs/glow_tts_gated_conv.json @@ -93,8 +93,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "apex_amp_level": null, diff --git a/TTS/tts/configs/glow_tts_ljspeech.json b/TTS/tts/configs/glow_tts_ljspeech.json index 6e15de10..5a4c47c2 100644 --- a/TTS/tts/configs/glow_tts_ljspeech.json +++ b/TTS/tts/configs/glow_tts_ljspeech.json @@ -105,8 +105,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/tts/configs/ljspeech_tacotron2_dynamic_conv_attn.json b/TTS/tts/configs/ljspeech_tacotron2_dynamic_conv_attn.json index 3cf66870..11e42259 100644 --- a/TTS/tts/configs/ljspeech_tacotron2_dynamic_conv_attn.json +++ b/TTS/tts/configs/ljspeech_tacotron2_dynamic_conv_attn.json @@ -121,8 +121,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/tts/configs/speedy_speech_ljspeech.json b/TTS/tts/configs/speedy_speech_ljspeech.json index 9f1d3f8b..f61f35cd 100644 --- a/TTS/tts/configs/speedy_speech_ljspeech.json +++ b/TTS/tts/configs/speedy_speech_ljspeech.json @@ -109,8 +109,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n "mixed_precision": false, diff --git a/TTS/utils/arguments.py b/TTS/utils/arguments.py index 5fd4cf3f..e4983bfb 100644 --- a/TTS/utils/arguments.py +++ b/TTS/utils/arguments.py @@ -67,7 +67,7 @@ def parse_arguments(argv): return parser.parse_args() -def get_last_models(path): +def get_last_checkpoint(path): """Get latest checkpoint or/and best model in path. It is based on globbing for `*.pth.tar` and the RegEx @@ -144,7 +144,7 @@ def process_args(args, model_type): if args.continue_path: args.output_path = args.continue_path args.config_path = os.path.join(args.continue_path, "config.json") - args.restore_path, best_model = get_last_models(args.continue_path) + args.restore_path, best_model = get_last_checkpoint(args.continue_path) if not args.best_path: args.best_path = best_model diff --git a/TTS/vocoder/configs/multiband-melgan_and_rwd_config.json b/TTS/vocoder/configs/multiband-melgan_and_rwd_config.json index b4d42f4b..2670c0f3 100644 --- a/TTS/vocoder/configs/multiband-melgan_and_rwd_config.json +++ b/TTS/vocoder/configs/multiband-melgan_and_rwd_config.json @@ -138,8 +138,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/multiband_melgan_config.json b/TTS/vocoder/configs/multiband_melgan_config.json index af2af8a3..807f0836 100644 --- a/TTS/vocoder/configs/multiband_melgan_config.json +++ b/TTS/vocoder/configs/multiband_melgan_config.json @@ -128,8 +128,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/multiband_melgan_config_mozilla.json b/TTS/vocoder/configs/multiband_melgan_config_mozilla.json index 0f133fa7..255315c8 100644 --- a/TTS/vocoder/configs/multiband_melgan_config_mozilla.json +++ b/TTS/vocoder/configs/multiband_melgan_config_mozilla.json @@ -141,8 +141,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/parallel_wavegan_config.json b/TTS/vocoder/configs/parallel_wavegan_config.json index 85e659f4..193b1f7b 100644 --- a/TTS/vocoder/configs/parallel_wavegan_config.json +++ b/TTS/vocoder/configs/parallel_wavegan_config.json @@ -130,8 +130,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/universal_fullband_melgan.json b/TTS/vocoder/configs/universal_fullband_melgan.json index efb6f3cd..511ae70e 100644 --- a/TTS/vocoder/configs/universal_fullband_melgan.json +++ b/TTS/vocoder/configs/universal_fullband_melgan.json @@ -124,8 +124,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/wavegrad_libritts.json b/TTS/vocoder/configs/wavegrad_libritts.json index 9107d556..ade20a8f 100644 --- a/TTS/vocoder/configs/wavegrad_libritts.json +++ b/TTS/vocoder/configs/wavegrad_libritts.json @@ -103,8 +103,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 5000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/configs/wavernn_config.json b/TTS/vocoder/configs/wavernn_config.json index 220904c9..aa2d7b9f 100644 --- a/TTS/vocoder/configs/wavernn_config.json +++ b/TTS/vocoder/configs/wavernn_config.json @@ -89,8 +89,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": false, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": false, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/TTS/vocoder/utils/io.py b/TTS/vocoder/utils/io.py index 232d972d..60def72a 100644 --- a/TTS/vocoder/utils/io.py +++ b/TTS/vocoder/utils/io.py @@ -64,7 +64,7 @@ def save_checkpoint(model, optimizer, scheduler, model_disc, optimizer_disc, def save_best_model(current_loss, best_loss, model, optimizer, scheduler, model_disc, optimizer_disc, scheduler_disc, current_step, - epoch, out_path, keep_best=False, keep_after=10000, + epoch, out_path, keep_all_best=False, keep_after=10000, **kwargs): if current_loss < best_loss: best_model_name = f'best_model_{current_step}.pth.tar' @@ -82,7 +82,7 @@ def save_best_model(current_loss, best_loss, model, optimizer, scheduler, model_loss=current_loss, **kwargs) # only delete previous if current is saved successfully - if not keep_best or (current_step < keep_after): + if not keep_all_best or (current_step < keep_after): model_names = glob.glob( os.path.join(out_path, 'best_model*.pth.tar')) for model_name in model_names: diff --git a/tests/inputs/test_glow_tts.json b/tests/inputs/test_glow_tts.json index 338ed8ec..0ee9395b 100644 --- a/tests/inputs/test_glow_tts.json +++ b/tests/inputs/test_glow_tts.json @@ -106,8 +106,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. "apex_amp_level": null, diff --git a/tests/inputs/test_speedy_speech.json b/tests/inputs/test_speedy_speech.json index 4f9f36bc..c4e27737 100644 --- a/tests/inputs/test_speedy_speech.json +++ b/tests/inputs/test_speedy_speech.json @@ -111,8 +111,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 5000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.:set n "mixed_precision": false, diff --git a/tests/inputs/test_train_config.json b/tests/inputs/test_train_config.json index 8c9e20d3..14449867 100644 --- a/tests/inputs/test_train_config.json +++ b/tests/inputs/test_train_config.json @@ -122,8 +122,8 @@ "print_eval": false, // If True, it prints intermediate loss values in evalulation. "save_step": 10000, // Number of training steps expected to save traninpg stats and checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/tests/inputs/test_vocoder_multiband_melgan_config.json b/tests/inputs/test_vocoder_multiband_melgan_config.json index e1d201ab..92deaee4 100644 --- a/tests/inputs/test_vocoder_multiband_melgan_config.json +++ b/tests/inputs/test_vocoder_multiband_melgan_config.json @@ -131,8 +131,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/tests/inputs/test_vocoder_wavegrad.json b/tests/inputs/test_vocoder_wavegrad.json index 5a068751..f6208e8d 100644 --- a/tests/inputs/test_vocoder_wavegrad.json +++ b/tests/inputs/test_vocoder_wavegrad.json @@ -101,8 +101,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 10000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": true, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING diff --git a/tests/inputs/test_vocoder_wavernn_config.json b/tests/inputs/test_vocoder_wavernn_config.json index 4239e8bd..decafa70 100644 --- a/tests/inputs/test_vocoder_wavernn_config.json +++ b/tests/inputs/test_vocoder_wavernn_config.json @@ -97,8 +97,8 @@ "print_eval": false, // If True, it prints loss values for each step in eval run. "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. "checkpoint": true, // If true, it saves checkpoints per "save_step" - "keep_best": true, // If true, keeps all best_models after keep_after steps - "keep_after": 10000, // Global step after which to keep best models if keep_best is true + "keep_all_best": true, // If true, keeps all best_models after keep_after steps + "keep_after": 10000, // Global step after which to keep best models if keep_all_best is true "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. // DATA LOADING