mirror of https://github.com/coqui-ai/TTS.git
renaming for melgan generator
parent
cc2104935e
commit
58784ad09c
|
@ -21,7 +21,7 @@ class ResidualStack(nn.Module):
|
|||
nn.Conv1d(channels,
|
||||
channels,
|
||||
kernel_size=kernel_size,
|
||||
dilation=layer_padding,
|
||||
dilation=layer_dilation,
|
||||
bias=True)),
|
||||
nn.LeakyReLU(0.2),
|
||||
weight_norm(
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
"""Pseudo QMF modules."""
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
|
|
|
@ -77,16 +77,16 @@ class MelganGenerator(nn.Module):
|
|||
]
|
||||
self.layers = nn.Sequential(*layers)
|
||||
|
||||
def forward(self, cond_features):
|
||||
return self.layers(cond_features)
|
||||
def forward(self, c):
|
||||
return self.layers(c)
|
||||
|
||||
def inference(self, cond_features):
|
||||
cond_features = cond_features.to(self.layers[1].weight.device)
|
||||
cond_features = torch.nn.functional.pad(
|
||||
cond_features,
|
||||
def inference(self, c):
|
||||
c = c.to(self.layers[1].weight.device)
|
||||
c = torch.nn.functional.pad(
|
||||
c,
|
||||
(self.inference_padding, self.inference_padding),
|
||||
'replicate')
|
||||
return self.layers(cond_features)
|
||||
return self.layers(c)
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for _, layer in enumerate(self.layers):
|
||||
|
|
Loading…
Reference in New Issue