mirror of https://github.com/coqui-ai/TTS.git
38 lines
1.5 KiB
Python
38 lines
1.5 KiB
Python
# coding: utf-8
|
|
import torch
|
|
from torch import nn
|
|
from utils.text.symbols import symbols
|
|
from layers.tacotron import Prenet, Encoder, Decoder, CBHG
|
|
|
|
|
|
class Tacotron(nn.Module):
|
|
def __init__(self, embedding_dim=256, linear_dim=1025, mel_dim=80,
|
|
r=5, padding_idx=None):
|
|
super(Tacotron, self).__init__()
|
|
self.r = r
|
|
self.mel_dim = mel_dim
|
|
self.linear_dim = linear_dim
|
|
self.embedding = nn.Embedding(len(symbols), embedding_dim,
|
|
padding_idx=padding_idx)
|
|
print(" | > Number of characters : {}".format(len(symbols)))
|
|
self.embedding.weight.data.normal_(0, 0.3)
|
|
self.encoder = Encoder(embedding_dim)
|
|
self.decoder = Decoder(256, mel_dim, r)
|
|
self.postnet = CBHG(mel_dim, K=8, projections=[256, mel_dim])
|
|
self.last_linear = nn.Linear(mel_dim * 2, linear_dim)
|
|
|
|
def forward(self, characters, mel_specs=None, text_lens=None):
|
|
B = characters.size(0)
|
|
inputs = self.embedding(characters)
|
|
# batch x time x dim
|
|
encoder_outputs = self.encoder(inputs)
|
|
# batch x time x dim*r
|
|
mel_outputs, alignments, stop_tokens = self.decoder(
|
|
encoder_outputs, mel_specs, text_lens)
|
|
# Reshape
|
|
# batch x time x dim
|
|
mel_outputs = mel_outputs.view(B, -1, self.mel_dim)
|
|
linear_outputs = self.postnet(mel_outputs)
|
|
linear_outputs = self.last_linear(linear_outputs)
|
|
return mel_outputs, linear_outputs, alignments, stop_tokens
|