mirror of https://github.com/MycroftAI/mimic2.git
Replace Amy with Danny data
parent
c7efb3c208
commit
59f51f7197
|
@ -40,6 +40,9 @@ def _process_utterance(out_dir, prompt_id, wav_path, text):
|
||||||
wav = wav[margin:-margin]
|
wav = wav[margin:-margin]
|
||||||
wav, _ = librosa.effects.trim(wav, top_db=40, frame_length=1024, hop_length=256)
|
wav, _ = librosa.effects.trim(wav, top_db=40, frame_length=1024, hop_length=256)
|
||||||
|
|
||||||
|
# Normalize
|
||||||
|
wav /= max(0.01, np.max(np.abs(wav)))
|
||||||
|
|
||||||
# Compute the linear-scale spectrogram from the wav:
|
# Compute the linear-scale spectrogram from the wav:
|
||||||
spectrogram = audio.spectrogram(wav).astype(np.float32)
|
spectrogram = audio.spectrogram(wav).astype(np.float32)
|
||||||
n_frames = spectrogram.shape[1]
|
n_frames = spectrogram.shape[1]
|
||||||
|
@ -48,8 +51,8 @@ def _process_utterance(out_dir, prompt_id, wav_path, text):
|
||||||
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
|
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
|
||||||
|
|
||||||
# Write the spectrograms to disk:
|
# Write the spectrograms to disk:
|
||||||
spectrogram_filename = 'amy-spec-%s.npy' % prompt_id
|
spectrogram_filename = 'danny-spec-%s.npy' % prompt_id
|
||||||
mel_filename = 'amy-mel-%s.npy' % prompt_id
|
mel_filename = 'danny-mel-%s.npy' % prompt_id
|
||||||
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
|
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
|
||||||
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
|
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
|
||||||
|
|
|
@ -2,7 +2,7 @@ import argparse
|
||||||
import os
|
import os
|
||||||
from multiprocessing import cpu_count
|
from multiprocessing import cpu_count
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from datasets import amy, blizzard, ljspeech
|
from datasets import danny, blizzard, ljspeech
|
||||||
from hparams import hparams
|
from hparams import hparams
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,11 +22,11 @@ def preprocess_ljspeech(args):
|
||||||
write_metadata(metadata, out_dir)
|
write_metadata(metadata, out_dir)
|
||||||
|
|
||||||
|
|
||||||
def preprocess_amy(args):
|
def preprocess_danny(args):
|
||||||
in_dir = os.path.join(args.base_dir, 'amy')
|
in_dir = os.path.join(args.base_dir, 'danny')
|
||||||
out_dir = os.path.join(args.base_dir, args.output)
|
out_dir = os.path.join(args.base_dir, args.output)
|
||||||
os.makedirs(out_dir, exist_ok=True)
|
os.makedirs(out_dir, exist_ok=True)
|
||||||
metadata = amy.build_from_path(in_dir, out_dir, args.num_workers, tqdm=tqdm)
|
metadata = danny.build_from_path(in_dir, out_dir, args.num_workers, tqdm=tqdm)
|
||||||
write_metadata(metadata, out_dir)
|
write_metadata(metadata, out_dir)
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,11 +45,11 @@ def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
|
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
|
||||||
parser.add_argument('--output', default='training')
|
parser.add_argument('--output', default='training')
|
||||||
parser.add_argument('--dataset', required=True, choices=['amy', 'blizzard', 'ljspeech'])
|
parser.add_argument('--dataset', required=True, choices=['danny', 'blizzard', 'ljspeech'])
|
||||||
parser.add_argument('--num_workers', type=int, default=cpu_count())
|
parser.add_argument('--num_workers', type=int, default=cpu_count())
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.dataset == 'amy':
|
if args.dataset == 'danny':
|
||||||
preprocess_amy(args)
|
preprocess_danny(args)
|
||||||
elif args.dataset == 'blizzard':
|
elif args.dataset == 'blizzard':
|
||||||
preprocess_blizzard(args)
|
preprocess_blizzard(args)
|
||||||
elif args.dataset == 'ljspeech':
|
elif args.dataset == 'ljspeech':
|
||||||
|
|
Loading…
Reference in New Issue