mirror of https://github.com/coqui-ai/TTS.git
Fix Capacitron training (#2086)
parent
5ccef6e665
commit
5307a2229b
|
@ -344,7 +344,7 @@ class BaseTTS(BaseTrainerModel):
|
||||||
loader = DataLoader(
|
loader = DataLoader(
|
||||||
dataset,
|
dataset,
|
||||||
batch_size=config.eval_batch_size if is_eval else config.batch_size,
|
batch_size=config.eval_batch_size if is_eval else config.batch_size,
|
||||||
shuffle=False, # shuffle is done in the dataset.
|
shuffle=True, # if there is no other sampler
|
||||||
collate_fn=dataset.collate_fn,
|
collate_fn=dataset.collate_fn,
|
||||||
drop_last=False, # setting this False might cause issues in AMP training.
|
drop_last=False, # setting this False might cause issues in AMP training.
|
||||||
sampler=sampler,
|
sampler=sampler,
|
||||||
|
|
|
@ -38,9 +38,9 @@ class CapacitronOptimizer:
|
||||||
self.param_groups = self.primary_optimizer.param_groups
|
self.param_groups = self.primary_optimizer.param_groups
|
||||||
self.primary_optimizer.step()
|
self.primary_optimizer.step()
|
||||||
|
|
||||||
def zero_grad(self):
|
def zero_grad(self, set_to_none=False):
|
||||||
self.primary_optimizer.zero_grad()
|
self.primary_optimizer.zero_grad(set_to_none)
|
||||||
self.secondary_optimizer.zero_grad()
|
self.secondary_optimizer.zero_grad(set_to_none)
|
||||||
|
|
||||||
def load_state_dict(self, state_dict):
|
def load_state_dict(self, state_dict):
|
||||||
self.primary_optimizer.load_state_dict(state_dict[0])
|
self.primary_optimizer.load_state_dict(state_dict[0])
|
||||||
|
|
Loading…
Reference in New Issue