From 55d9209221862d21c6b55869fe20522d9a103b6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eren=20G=C3=B6lge?= Date: Thu, 30 Sep 2021 14:58:26 +0000 Subject: [PATCH] Remote STT tokenizer --- TTS/trainer.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/TTS/trainer.py b/TTS/trainer.py index d75b8e14..006a702b 100644 --- a/TTS/trainer.py +++ b/TTS/trainer.py @@ -19,7 +19,6 @@ from torch import nn from torch.nn.parallel import DistributedDataParallel as DDP_th from torch.utils.data import DataLoader -from TTS.stt.datasets.tokenizer import Tokenizer from TTS.utils.callbacks import TrainerCallback from TTS.utils.distribute import init_distributed from TTS.utils.generic_utils import ( @@ -103,7 +102,6 @@ class Trainer: get_data_samples: Callable = None, train_samples: List = None, eval_samples: List = None, - tokenizer: Tokenizer = None, cudnn_benchmark: bool = False, training_assets: Dict = {}, parse_command_line_args: bool = True, @@ -237,9 +235,6 @@ class Trainer: self.use_apex = self._is_apex_available() self.use_amp_scaler = self.config.mixed_precision and self.use_cuda - # init tokenizer - self.tokenizer = tokenizer - # load data samples if train_samples is None and get_data_samples is None: raise ValueError("[!] `train_samples` and `get_data_samples` cannot both be None.")