From 39b584581001019063db9abbfc25b8b88eaa22f1 Mon Sep 17 00:00:00 2001 From: rishikksh20 Date: Tue, 23 Feb 2021 22:42:12 +0530 Subject: [PATCH] 1) Add hifigan json files 2) Rename MPD disc 3) Re-format remove weight norm generator --- TTS/vocoder/configs/modified_hifigan.json | 140 ++++++++++++++++++ TTS/vocoder/layers/hifigan.py | 14 +- TTS/vocoder/models/hifigan_generator.py | 14 +- ...nator.py => multi_period_discriminator.py} | 30 +++- 4 files changed, 174 insertions(+), 24 deletions(-) create mode 100644 TTS/vocoder/configs/modified_hifigan.json rename TTS/vocoder/models/{hifigan_mpd_discriminator.py => multi_period_discriminator.py} (66%) diff --git a/TTS/vocoder/configs/modified_hifigan.json b/TTS/vocoder/configs/modified_hifigan.json new file mode 100644 index 00000000..2330efdd --- /dev/null +++ b/TTS/vocoder/configs/modified_hifigan.json @@ -0,0 +1,140 @@ +{ + "run_name": "hifigan", + "run_description": "hifigan mean-var scaling", + + // AUDIO PARAMETERS + "audio":{ + "fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame. + "win_length": 1024, // stft window length in ms. + "hop_length": 256, // stft window hop-lengh in ms. + "frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used. + "frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used. + + // Audio processing parameters + "sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled. + "preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis. + "ref_level_db": 0, // reference level db, theoretically 20db is the sound of air. + + // Silence trimming + "do_trim_silence": true,// enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true) + "trim_db": 60, // threshold for timming silence. Set this according to your dataset. + + // MelSpectrogram parameters + "num_mels": 80, // size of the mel spec frame. + "mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!! + "mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!! + "spec_gain": 1.0, // scaler value appplied after log transform of spectrogram. + + // Normalization parameters + "signal_norm": true, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params. + "min_level_db": -100, // lower bound for normalization + "symmetric_norm": true, // move normalization to range [-1, 1] + "max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm] + "clip_norm": true, // clip normalized values into the range. + "stats_path": "/home/erogol/Data/libritts/LibriTTS/scale_stats.npy" // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored + }, + + // DISTRIBUTED TRAINING + "distributed":{ + "backend": "nccl", + "url": "tcp:\/\/localhost:54324" + }, + + // MODEL PARAMETERS + "use_pqmf": false, + + // LOSS PARAMETERS + "use_stft_loss": false, + "use_subband_stft_loss": false, + "use_mse_gan_loss": true, + "use_hinge_gan_loss": false, + "use_feat_match_loss": false, // use only with melgan discriminators + + // loss weights + "stft_loss_weight": 0.5, + "subband_stft_loss_weight": 0.5, + "mse_G_loss_weight": 2.5, + "hinge_G_loss_weight": 2.5, + "feat_match_loss_weight": 25, + + // multiscale stft loss parameters + "stft_loss_params": { + "n_ffts": [1024, 2048, 512], + "hop_lengths": [120, 240, 50], + "win_lengths": [600, 1200, 240] + }, + + "target_loss": "avg_G_loss", // loss value to pick the best model to save after each epoch + + // DISCRIMINATOR + "discriminator_model": "hifigan_mpd_discriminator", + "discriminator_model_params":{ + "peroids": [2, 3, 5, 7, 11], + "base_channels": 16, + "max_channels":512, + "downsample_factors":[4, 4, 4] + }, + "steps_to_start_discriminator": 1, // steps required to start GAN trainining.1 + + // GENERATOR + "generator_model": "hifigan_generator", + "generator_model_params": { + "upsample_factors":[8,8,2,2], + "upsample_kernel_sizes": [16,16,4,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]] + }, + + // DATASET + "data_path": "/home/erogol/Data/libritts/LibriTTS/train-clean-360/", + "feature_path": null, + "seq_len": 16384, + "pad_short": 2000, + "conv_pad": 0, + "use_noise_augment": false, + "use_cache": true, + + "reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers. + + // TRAINING + "batch_size": 48, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'. + + // VALIDATION + "run_eval": true, + "test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time. + "test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences. + + // OPTIMIZER + "epochs": 10000, // total number of epochs to train. + "wd": 0.0, // Weight decay weight. + "gen_clip_grad": -1, // Generator gradient clipping threshold. Apply gradient clipping if > 0 + "disc_clip_grad": -1, // Discriminator gradient clipping threshold. + "lr_scheduler_gen": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate + "lr_scheduler_gen_params": { + "gamma": 0.999 + }, + "lr_scheduler_disc": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate + "lr_scheduler_disc_params": { + "gamma": 0.999 + }, + "lr_gen": 0.0002, // Initial learning rate. If Noam decay is active, maximum learning rate. + "lr_disc": 0.0002, + + // TENSORBOARD and LOGGING + "print_step": 25, // Number of steps to log traning on console. + "print_eval": false, // If True, it prints loss values for each step in eval run. + "save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints. + "checkpoint": true, // If true, it saves checkpoints per "save_step" + "tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging. + + // DATA LOADING + "num_loader_workers": 4, // number of training data loader processes. Don't set it too big. 4-8 are good values. + "num_val_loader_workers": 4, // number of evaluation data loader processes. + "eval_split_size": 10, + + // PATHS + "output_path": "/home/erogol/Models/" +} + + diff --git a/TTS/vocoder/layers/hifigan.py b/TTS/vocoder/layers/hifigan.py index a60f96be..28fbb7b0 100644 --- a/TTS/vocoder/layers/hifigan.py +++ b/TTS/vocoder/layers/hifigan.py @@ -24,15 +24,13 @@ class ResStack(nn.Module): return x1 + x2 def remove_weight_norm(self): - # nn.utils.remove_weight_norm(self.resstack[2]) - # nn.utils.remove_weight_norm(self.resstack[4]) - for idx, layer in enumerate(self.resstack): - if len(layer.state_dict()) != 0: - try: - nn.utils.remove_weight_norm(layer) - except: - layer.remove_weight_norm() nn.utils.remove_weight_norm(self.shortcut) + nn.utils.remove_weight_norm(self.resstack[2]) + nn.utils.remove_weight_norm(self.resstack[5]) + nn.utils.remove_weight_norm(self.resstack[8]) + nn.utils.remove_weight_norm(self.resstack[11]) + nn.utils.remove_weight_norm(self.resstack[14]) + nn.utils.remove_weight_norm(self.resstack[17]) class MRF(nn.Module): def __init__(self, kernels, channel, dilations = [[1,1], [3,1], [5,1]]): diff --git a/TTS/vocoder/models/hifigan_generator.py b/TTS/vocoder/models/hifigan_generator.py index 24aed482..370fe17b 100644 --- a/TTS/vocoder/models/hifigan_generator.py +++ b/TTS/vocoder/models/hifigan_generator.py @@ -39,19 +39,9 @@ class Generator(nn.Module): return out def remove_weight_norm(self): - for idx, layer in enumerate(self.input): - if len(layer.state_dict()) != 0: - try: - nn.utils.remove_weight_norm(layer) - except: - layer.remove_weight_norm() + nn.utils.remove_weight_norm(self.input[1]) + nn.utils.remove_weight_norm(self.output[2]) - for idx, layer in enumerate(self.output): - if len(layer.state_dict()) != 0: - try: - nn.utils.remove_weight_norm(layer) - except: - layer.remove_weight_norm() for idx, layer in enumerate(self.generator): if len(layer.state_dict()) != 0: try: diff --git a/TTS/vocoder/models/hifigan_mpd_discriminator.py b/TTS/vocoder/models/multi_period_discriminator.py similarity index 66% rename from TTS/vocoder/models/hifigan_mpd_discriminator.py rename to TTS/vocoder/models/multi_period_discriminator.py index 84891b4e..b7e55974 100644 --- a/TTS/vocoder/models/hifigan_mpd_discriminator.py +++ b/TTS/vocoder/models/multi_period_discriminator.py @@ -46,9 +46,20 @@ class PeriodDiscriminator(nn.Module): return features[-1], features[:-1] -class HiFiDiscriminator(nn.Module): - def __init__(self, periods=[2, 3, 5, 7, 11]): - super(HiFiDiscriminator, self).__init__() +class MultiPeriodDiscriminator(nn.Module): + def __init__(self, + periods=[2, 3, 5, 7, 11], + in_channels=1, + out_channels=1, + num_scales=3, + kernel_sizes=(5, 3), + base_channels=16, + max_channels=1024, + downsample_factors=(4, 4, 4), + pooling_kernel_size=4, + pooling_stride=2, + pooling_padding=1): + super(MultiPeriodDiscriminator, self).__init__() self.discriminators = nn.ModuleList([ PeriodDiscriminator(periods[0]), PeriodDiscriminator(periods[1]), PeriodDiscriminator(periods[2]), @@ -56,7 +67,18 @@ class HiFiDiscriminator(nn.Module): PeriodDiscriminator(periods[4]), ]) - self.msd = MelganMultiscaleDiscriminator() + self.msd = MelganMultiscaleDiscriminator( + in_channels=1, + out_channels=1, + num_scales=3, + kernel_sizes=(5, 3), + base_channels=16, + max_channels=1024, + downsample_factors=(4, 4, 4), + pooling_kernel_size=4, + pooling_stride=2, + pooling_padding=1 + ) def forward(self, x): scores, feats = self.msd(x)