add apex in check_arguments

pull/10/head
erogol 2020-08-03 12:41:06 +02:00
parent eb905aafd3
commit e2151e77a1
2 changed files with 2 additions and 2 deletions

View File

@ -637,7 +637,7 @@ if __name__ == '__main__':
check_config(c)
_ = os.path.dirname(os.path.realpath(__file__))
if c.apex_amp_level:
if c.apex_amp_level is 'O1':
print(" > apex AMP level: ", c.apex_amp_level)
OUT_PATH = args.continue_path

View File

@ -67,7 +67,7 @@
"gradual_training": [[0, 7, 64], [1, 5, 64], [50000, 3, 32], [130000, 2, 32], [290000, 1, 32]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
"ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"apex_amp_level": "", // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, use "" (empty string) to deactivate
"apex_amp_level": null, // level of optimization with NVIDIA's apex feature for automatic mixed FP16/FP32 precision (AMP), NOTE: currently only O1 is supported, and use "O1" to activate.
// VALIDATION
"run_eval": true,