mirror of https://github.com/MycroftAI/mimic2.git
Add preprocessor for Amy data
parent
bdee55e2b9
commit
9ac364a647
|
@ -0,0 +1,49 @@
|
||||||
|
from concurrent.futures import ProcessPoolExecutor
|
||||||
|
from functools import partial
|
||||||
|
import glob
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
from util import audio
|
||||||
|
|
||||||
|
|
||||||
|
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
|
||||||
|
'''Preprocesses the Amy dataset from a given input path into a given output directory.'''
|
||||||
|
executor = ProcessPoolExecutor(max_workers=num_workers)
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
# Read all of the .wav files:
|
||||||
|
paths = {}
|
||||||
|
for path in glob.glob(os.path.join(in_dir, 'audio', '*.wav')):
|
||||||
|
prompt_id = os.path.basename(path).split('-')[-2]
|
||||||
|
paths[prompt_id] = path
|
||||||
|
|
||||||
|
# Read the prompts file:
|
||||||
|
with open(os.path.join(in_dir, 'prompts.txt'), encoding='utf-8') as f:
|
||||||
|
for line in f:
|
||||||
|
parts = line.strip().split('\t')
|
||||||
|
if len(parts) == 3 and parts[0] in paths:
|
||||||
|
path = paths[parts[0]]
|
||||||
|
text = parts[2]
|
||||||
|
futures.append(executor.submit(partial(_process_utterance, out_dir, parts[0], path, text)))
|
||||||
|
return [future.result() for future in tqdm(futures)]
|
||||||
|
|
||||||
|
|
||||||
|
def _process_utterance(out_dir, prompt_id, wav_path, text):
|
||||||
|
# Load the audio to a numpy array:
|
||||||
|
wav = audio.load_wav(wav_path)
|
||||||
|
|
||||||
|
# Compute the linear-scale spectrogram from the wav:
|
||||||
|
spectrogram = audio.spectrogram(wav).astype(np.float32)
|
||||||
|
n_frames = spectrogram.shape[1]
|
||||||
|
|
||||||
|
# Compute a mel-scale spectrogram from the wav:
|
||||||
|
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32)
|
||||||
|
|
||||||
|
# Write the spectrograms to disk:
|
||||||
|
spectrogram_filename = 'amy-spec-%s.npy' % prompt_id
|
||||||
|
mel_filename = 'amy-mel-%s.npy' % prompt_id
|
||||||
|
np.save(os.path.join(out_dir, spectrogram_filename), spectrogram.T, allow_pickle=False)
|
||||||
|
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
|
||||||
|
|
||||||
|
# Return a tuple describing this training example:
|
||||||
|
return (spectrogram_filename, mel_filename, n_frames, text)
|
|
@ -2,7 +2,7 @@ import argparse
|
||||||
import os
|
import os
|
||||||
from multiprocessing import cpu_count
|
from multiprocessing import cpu_count
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
from datasets import blizzard, ljspeech
|
from datasets import amy, blizzard, ljspeech
|
||||||
from hparams import hparams
|
from hparams import hparams
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,6 +22,14 @@ def preprocess_ljspeech(args):
|
||||||
write_metadata(metadata, out_dir)
|
write_metadata(metadata, out_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_amy(args):
|
||||||
|
in_dir = os.path.join(args.base_dir, 'amy')
|
||||||
|
out_dir = os.path.join(args.base_dir, args.output)
|
||||||
|
os.makedirs(out_dir, exist_ok=True)
|
||||||
|
metadata = amy.build_from_path(in_dir, out_dir, args.num_workers, tqdm=tqdm)
|
||||||
|
write_metadata(metadata, out_dir)
|
||||||
|
|
||||||
|
|
||||||
def write_metadata(metadata, out_dir):
|
def write_metadata(metadata, out_dir):
|
||||||
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
|
with open(os.path.join(out_dir, 'train.txt'), 'w', encoding='utf-8') as f:
|
||||||
for m in metadata:
|
for m in metadata:
|
||||||
|
@ -37,10 +45,12 @@ def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
|
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
|
||||||
parser.add_argument('--output', default='training')
|
parser.add_argument('--output', default='training')
|
||||||
parser.add_argument('--dataset', required=True, choices=['blizzard', 'ljspeech'])
|
parser.add_argument('--dataset', required=True, choices=['amy', 'blizzard', 'ljspeech'])
|
||||||
parser.add_argument('--num_workers', type=int, default=cpu_count())
|
parser.add_argument('--num_workers', type=int, default=cpu_count())
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.dataset == 'blizzard':
|
if args.dataset == 'amy':
|
||||||
|
preprocess_amy(args)
|
||||||
|
elif args.dataset == 'blizzard':
|
||||||
preprocess_blizzard(args)
|
preprocess_blizzard(args)
|
||||||
elif args.dataset == 'ljspeech':
|
elif args.dataset == 'ljspeech':
|
||||||
preprocess_ljspeech(args)
|
preprocess_ljspeech(args)
|
||||||
|
|
Loading…
Reference in New Issue