diff --git a/layers/attention.py b/layers/attention.py index 48e757a5..5436f110 100644 --- a/layers/attention.py +++ b/layers/attention.py @@ -1,7 +1,7 @@ import torch from torch import nn from torch.nn import functional as F -from TTS.utils.generic_utils import sequence_mask +from utils.generic_utils import sequence_mask class BahdanauAttention(nn.Module): diff --git a/models/tacotron.py b/models/tacotron.py index 0606f7e6..8a215b90 100644 --- a/models/tacotron.py +++ b/models/tacotron.py @@ -1,7 +1,7 @@ # coding: utf-8 import torch from torch import nn -from TTS.utils.text.symbols import symbols +from utils.text.symbols import symbols from layers.tacotron import Prenet, Encoder, Decoder, PostCBHG diff --git a/utils/text/__init__.py b/utils/text/__init__.py index 4cf4ca98..37716fa9 100644 --- a/utils/text/__init__.py +++ b/utils/text/__init__.py @@ -1,8 +1,8 @@ # -*- coding: utf-8 -*- import re -from TTS.utils.text import cleaners -from TTS.utils.text.symbols import symbols +from utils.text import cleaners +from utils.text.symbols import symbols # Mappings from symbol to numeric ID and vice versa: _symbol_to_id = {s: i for i, s in enumerate(symbols)}