Fix recipes as to the recent API changes. (#1367)

* Fix recipes -> #1366

* Fix trainer docs
pull/1370/head
Eren Gölge 2022-03-10 11:36:38 +01:00 committed by GitHub
parent d792b78703
commit 48f6bb405a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 9 additions and 28 deletions

View File

@ -1,17 +1,3 @@
# Trainer API
The {class}`TTS.trainer.Trainer` provides a lightweight, extensible, and feature-complete training run-time. We optimized it for 🐸 but
can also be used for any DL training in different domains. It supports distributed multi-gpu, mixed-precision (apex or torch.amp) training.
## Trainer
```{eval-rst}
.. autoclass:: TTS.trainer.Trainer
:members:
```
## TrainingArgs
```{eval-rst}
.. autoclass:: TTS.trainer.TrainingArgs
:members:
```
We made the trainer a seprate project on https://github.com/coqui-ai/Trainer

View File

@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict())
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
# init model
model = GAN(config)
model = GAN(config, ap)
# init the trainer and 🚀
trainer = Trainer(
@ -46,7 +46,6 @@ trainer = Trainer(
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
eval_samples=eval_samples
)
trainer.fit()

View File

@ -37,7 +37,7 @@ ap = AudioProcessor(**config.audio.to_dict())
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
# init model
model = GAN(config)
model = GAN(config, ap)
# init the trainer and 🚀
trainer = Trainer(
@ -46,7 +46,6 @@ trainer = Trainer(
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
eval_samples=eval_samples
)
trainer.fit()

View File

@ -89,7 +89,6 @@ trainer = Trainer(
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
eval_samples=eval_samples
)
trainer.fit()

View File

@ -36,7 +36,7 @@ ap = AudioProcessor(**config.audio.to_dict())
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)
# init model
model = GAN(config)
model = GAN(config, ap)
# init the trainer and 🚀
trainer = Trainer(
@ -45,7 +45,6 @@ trainer = Trainer(
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
eval_samples=eval_samples
)
trainer.fit()

View File

@ -136,7 +136,6 @@ trainer = Trainer(
output_path,
model=model,
train_samples=train_samples,
eval_samples=eval_samples,
training_assets={"audio_processor": ap},
eval_samples=eval_samples
)
trainer.fit()