mirror of https://github.com/coqui-ai/TTS.git
Update toy server for the recent updates
parent
0cca7920fc
commit
b8ca19fd2c
|
@ -2,8 +2,8 @@
|
|||
Steps to run:
|
||||
1. Download one of the models given on the main page. Click [here](https://drive.google.com/drive/folders/1Q6BKeEkZyxSGsocK2p_mqgzLwlNvbHFJ?usp=sharing) for the lastest model.
|
||||
2. Checkout the corresponding commit history or use ```server``` branch if you like to use the latest model.
|
||||
2. Set the paths and the other options in the file ```server/conf.json```.
|
||||
3. Run the server ```python server/server.py -c server/conf.json```. (Requires Flask)
|
||||
4. Go to ```localhost:[given_port]``` and enjoy.
|
||||
3. Set the paths and the other options in the file ```server/conf.json```.
|
||||
4. Run the server ```python server/server.py -c server/conf.json```. (Requires Flask)
|
||||
5. Go to ```localhost:[given_port]``` and enjoy.
|
||||
|
||||
For high quality results, please use the library versions shown in the ```requirements.txt``` file.
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"model_path":"../models/May-22-2018_03_24PM-e6112f7",
|
||||
"model_name":"checkpoint_272976.pth.tar",
|
||||
"model_path":"/home/erogol/projects/runs/2579/keep/November-04-2018_06+19PM-TTS-master-_tmp-debug/",
|
||||
"model_name":"best_model.pth.tar",
|
||||
"model_config":"config.json",
|
||||
"port": 5002,
|
||||
"use_cuda": true
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#!flask/bin/python
|
||||
import argparse
|
||||
from synthesizer import Synthesizer
|
||||
from TTS.utils.generic_utils import load_config
|
||||
from utils.generic_utils import load_config
|
||||
from flask import Flask, Response, request, render_template, send_file
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
|
@ -5,10 +5,10 @@ import torch
|
|||
import scipy
|
||||
import numpy as np
|
||||
import soundfile as sf
|
||||
from TTS.utils.text import text_to_sequence
|
||||
from TTS.utils.generic_utils import load_config
|
||||
from TTS.utils.audio import AudioProcessor
|
||||
from TTS.models.tacotron import Tacotron
|
||||
from utils.text import text_to_sequence
|
||||
from utils.generic_utils import load_config
|
||||
from utils.audio import AudioProcessor
|
||||
from models.tacotron import Tacotron
|
||||
from matplotlib import pylab as plt
|
||||
|
||||
|
||||
|
@ -22,19 +22,8 @@ class Synthesizer(object):
|
|||
config = load_config(model_config)
|
||||
self.config = config
|
||||
self.use_cuda = use_cuda
|
||||
self.model = Tacotron(config.embedding_size, config.num_freq,
|
||||
config.num_mels, config.r)
|
||||
self.ap = AudioProcessor(
|
||||
config.sample_rate,
|
||||
config.num_mels,
|
||||
config.min_level_db,
|
||||
config.frame_shift_ms,
|
||||
config.frame_length_ms,
|
||||
config.preemphasis,
|
||||
config.ref_level_db,
|
||||
config.num_freq,
|
||||
config.power,
|
||||
griffin_lim_iters=60)
|
||||
self.ap = AudioProcessor(**config.audio)
|
||||
self.model = Tacotron(config.embedding_size, self.ap.num_freq, self.ap.num_mels, config.r)
|
||||
# load model state
|
||||
if use_cuda:
|
||||
cp = torch.load(self.model_file)
|
||||
|
@ -48,9 +37,8 @@ class Synthesizer(object):
|
|||
self.model.eval()
|
||||
|
||||
def save_wav(self, wav, path):
|
||||
wav *= 32767 / max(1e-8, np.max(np.abs(wav)))
|
||||
librosa.output.write_wav(path, wav.astype(np.int16),
|
||||
self.config.sample_rate)
|
||||
# wav *= 32767 / max(1e-8, np.max(np.abs(wav)))
|
||||
self.ap.save_wav(wav, path)
|
||||
|
||||
def tts(self, text):
|
||||
text_cleaner = [self.config.text_cleaner]
|
||||
|
@ -70,7 +58,6 @@ class Synthesizer(object):
|
|||
chars_var)
|
||||
linear_out = linear_out[0].data.cpu().numpy()
|
||||
wav = self.ap.inv_spectrogram(linear_out.T)
|
||||
# wav = wav[:self.ap.find_endpoint(wav)]
|
||||
out = io.BytesIO()
|
||||
wavs.append(wav)
|
||||
wavs.append(np.zeros(10000))
|
||||
|
|
Loading…
Reference in New Issue