From 4ab8cbb0163b41c5af5d58d2d0c1c1a62034a45e Mon Sep 17 00:00:00 2001 From: Eren Golge Date: Thu, 10 May 2018 16:30:43 -0700 Subject: [PATCH] remove Variable from tacotron.py --- layers/tacotron.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/layers/tacotron.py b/layers/tacotron.py index 0229e9a5..96c27328 100644 --- a/layers/tacotron.py +++ b/layers/tacotron.py @@ -1,6 +1,5 @@ # coding: utf-8 import torch -from torch.autograd import Variable from torch import nn from .attention import AttentionRNN @@ -263,16 +262,12 @@ class Decoder(nn.Module): self.memory_dim, self.r) T_decoder = memory.size(1) # go frame - 0 frames tarting the sequence - initial_memory = Variable( - inputs.data.new(B, self.memory_dim * self.r).zero_()) + initial_memory = inputs.data.new(B, self.memory_dim * self.r).zero_() # Init decoder states - attention_rnn_hidden = Variable( - inputs.data.new(B, 256).zero_()) - decoder_rnn_hiddens = [Variable( - inputs.data.new(B, 256).zero_()) + attention_rnn_hidden = inputs.data.new(B, 256).zero_() + decoder_rnn_hiddens = [inputs.data.new(B, 256).zero_() for _ in range(len(self.decoder_rnns))] - current_context_vec = Variable( - inputs.data.new(B, 256).zero_()) + current_context_vec = inputs.data.new(B, 256).zero_() # Time first (T_decoder, B, memory_dim) if memory is not None: memory = memory.transpose(0, 1)