diff --git a/layers/attention.py b/layers/attention.py index 5436f110..48e757a5 100644 --- a/layers/attention.py +++ b/layers/attention.py @@ -1,7 +1,7 @@ import torch from torch import nn from torch.nn import functional as F -from utils.generic_utils import sequence_mask +from TTS.utils.generic_utils import sequence_mask class BahdanauAttention(nn.Module): diff --git a/models/tacotron.py b/models/tacotron.py index 8a215b90..0606f7e6 100644 --- a/models/tacotron.py +++ b/models/tacotron.py @@ -1,7 +1,7 @@ # coding: utf-8 import torch from torch import nn -from utils.text.symbols import symbols +from TTS.utils.text.symbols import symbols from layers.tacotron import Prenet, Encoder, Decoder, PostCBHG diff --git a/setup.py b/setup.py index 1d091d2b..72b12f7e 100644 --- a/setup.py +++ b/setup.py @@ -72,7 +72,7 @@ setup( }, setup_requires=["numpy==1.14.3"], install_requires=[ - "scipy==0.19.0", + "scipy >=0.19.0", "torch >= 0.4.1", "librosa==0.5.1", "unidecode==0.4.20", diff --git a/utils/generic_utils.py b/utils/generic_utils.py index c56c3edf..8f33fb42 100644 --- a/utils/generic_utils.py +++ b/utils/generic_utils.py @@ -11,7 +11,7 @@ import subprocess import numpy as np from collections import OrderedDict from torch.autograd import Variable -from utils.text import text_to_sequence +from TTS.utils.text import text_to_sequence class AttrDict(dict): diff --git a/utils/text/__init__.py b/utils/text/__init__.py index 37716fa9..4cf4ca98 100644 --- a/utils/text/__init__.py +++ b/utils/text/__init__.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import re -from utils.text import cleaners -from utils.text.symbols import symbols +from TTS.utils.text import cleaners +from TTS.utils.text.symbols import symbols # Mappings from symbol to numeric ID and vice versa: _symbol_to_id = {s: i for i, s in enumerate(symbols)} diff --git a/utils/text/symbols.py b/utils/text/symbols.py index 4c8f6c43..e55074f3 100644 --- a/utils/text/symbols.py +++ b/utils/text/symbols.py @@ -5,7 +5,7 @@ Defines the set of symbols used in text input to the model. The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' -from utils.text import cmudict +from TTS.utils.text import cmudict _pad = '_' _eos = '~'