mirror of https://github.com/coqui-ai/TTS.git
remove Variable from tacotron.py
parent
a856f76791
commit
4ab8cbb016
|
@ -1,6 +1,5 @@
|
|||
# coding: utf-8
|
||||
import torch
|
||||
from torch.autograd import Variable
|
||||
from torch import nn
|
||||
|
||||
from .attention import AttentionRNN
|
||||
|
@ -263,16 +262,12 @@ class Decoder(nn.Module):
|
|||
self.memory_dim, self.r)
|
||||
T_decoder = memory.size(1)
|
||||
# go frame - 0 frames tarting the sequence
|
||||
initial_memory = Variable(
|
||||
inputs.data.new(B, self.memory_dim * self.r).zero_())
|
||||
initial_memory = inputs.data.new(B, self.memory_dim * self.r).zero_()
|
||||
# Init decoder states
|
||||
attention_rnn_hidden = Variable(
|
||||
inputs.data.new(B, 256).zero_())
|
||||
decoder_rnn_hiddens = [Variable(
|
||||
inputs.data.new(B, 256).zero_())
|
||||
attention_rnn_hidden = inputs.data.new(B, 256).zero_()
|
||||
decoder_rnn_hiddens = [inputs.data.new(B, 256).zero_()
|
||||
for _ in range(len(self.decoder_rnns))]
|
||||
current_context_vec = Variable(
|
||||
inputs.data.new(B, 256).zero_())
|
||||
current_context_vec = inputs.data.new(B, 256).zero_()
|
||||
# Time first (T_decoder, B, memory_dim)
|
||||
if memory is not None:
|
||||
memory = memory.transpose(0, 1)
|
||||
|
|
Loading…
Reference in New Issue