TTS/utils/synthesis.py

48 lines
1.8 KiB
Python
Raw Normal View History

2018-11-02 15:13:51 +00:00
import io
import time
import librosa
import torch
import numpy as np
2019-02-25 16:20:05 +00:00
from .text import text_to_sequence, phoneme_to_sequence, sequence_to_phoneme
2018-11-02 15:13:51 +00:00
from .visual import visualize
from matplotlib import pylab as plt
2019-03-11 16:40:09 +00:00
def synthesis(model, text, CONFIG, use_cuda, ap, truncated=False):
"""Synthesize voice for the given text.
Args:
model (TTS.models): model to synthesize.
text (str): target text
CONFIG (dict): config dictionary to be loaded from config.json.
use_cuda (bool): enable cuda.
ap (TTS.utils.audio.AudioProcessor): audio processor to process
model outputs.
truncated (bool): keep model states after inference. It can be used
for continuous inference at long texts.
"""
2018-11-02 15:13:51 +00:00
text_cleaner = [CONFIG.text_cleaner]
2019-01-16 14:53:07 +00:00
if CONFIG.use_phonemes:
seq = np.asarray(
2019-03-11 16:40:09 +00:00
phoneme_to_sequence(text, text_cleaner, CONFIG.phoneme_language),
2019-01-16 14:53:07 +00:00
dtype=np.int32)
else:
2019-03-11 16:40:09 +00:00
seq = np.asarray(text_to_sequence(text, text_cleaner), dtype=np.int32)
2018-11-02 15:13:51 +00:00
chars_var = torch.from_numpy(seq).unsqueeze(0)
if use_cuda:
chars_var = chars_var.cuda()
2019-03-11 16:40:09 +00:00
if truncated:
decoder_output, postnet_output, alignments, stop_tokens = model.inference_truncated(
chars_var.long())
else:
decoder_output, postnet_output, alignments, stop_tokens = model.inference(
chars_var.long())
postnet_output = postnet_output[0].data.cpu().numpy()
decoder_output = decoder_output[0].data.cpu().numpy()
2018-11-02 15:13:51 +00:00
alignment = alignments[0].cpu().data.numpy()
if CONFIG.model == "Tacotron":
wav = ap.inv_spectrogram(postnet_output.T)
else:
wav = ap.inv_mel_spectrogram(postnet_output.T)
wav = wav[:ap.find_endpoint(wav)]
return wav, alignment, decoder_output, postnet_output, stop_tokens