diff --git a/layers/common_layers.py b/layers/common_layers.py index d2afe012..24433269 100644 --- a/layers/common_layers.py +++ b/layers/common_layers.py @@ -33,7 +33,7 @@ class LinearBN(nn.Module): super(LinearBN, self).__init__() self.linear_layer = torch.nn.Linear( in_features, out_features, bias=bias) - self.batch_normalization = nn.BatchNorm1d(out_features) + self.batch_normalization = nn.BatchNorm1d(out_features, momentum=0.1, eps=1e-5) self._init_w(init_gain) def _init_w(self, init_gain): diff --git a/layers/tacotron2.py b/layers/tacotron2.py index b9aec6fe..bdb169be 100644 --- a/layers/tacotron2.py +++ b/layers/tacotron2.py @@ -14,7 +14,7 @@ class ConvBNBlock(nn.Module): out_channels, kernel_size, padding=padding) - self.batch_normalization = nn.BatchNorm1d(out_channels) + self.batch_normalization = nn.BatchNorm1d(out_channels, momentum=0.1, eps=1e-5) self.dropout = nn.Dropout(p=0.5) if activation == 'relu': self.activation = nn.ReLU()