mirror of https://github.com/coqui-ai/TTS.git
optional lr schedule
parent
0b6a9995fc
commit
bb04a1c6e0
10
config.json
10
config.json
|
@ -27,11 +27,15 @@
|
|||
"embedding_size": 256,
|
||||
"text_cleaner": "english_cleaners",
|
||||
"epochs": 1000,
|
||||
"lr": 0.0015,
|
||||
|
||||
// "lr": 0.0015,
|
||||
"lr": 0.0001,
|
||||
"lr_decay": false,
|
||||
"warmup_steps": 4000,
|
||||
"batch_size":32,
|
||||
|
||||
"batch_size": 32,
|
||||
"eval_batch_size":32,
|
||||
"r": 1,
|
||||
"r": 5,
|
||||
"wd": 0.000001,
|
||||
"checkpoint": true,
|
||||
"save_step": 5000,
|
||||
|
|
4
train.py
4
train.py
|
@ -58,7 +58,8 @@ def train(model, criterion, criterion_st, data_loader, optimizer, optimizer_st,
|
|||
epoch * len(data_loader) + 1
|
||||
|
||||
# setup lr
|
||||
scheduler.step()
|
||||
if c.lr_decay:
|
||||
scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
optimizer_st.zero_grad()
|
||||
|
||||
|
@ -92,6 +93,7 @@ def train(model, criterion, criterion_st, data_loader, optimizer, optimizer_st,
|
|||
|
||||
# backpass and check the grad norm for spec losses
|
||||
loss.backward(retain_graph=True)
|
||||
# custom weight decay
|
||||
for group in optimizer.param_groups:
|
||||
for param in group['params']:
|
||||
param.data = param.data.add(-c.wd * group['lr'], param.data)
|
||||
|
|
Loading…
Reference in New Issue