From 3601e9c0727200b7819f91086848a3269ebb7b8f Mon Sep 17 00:00:00 2001 From: "Ruotian(RT) Luo" Date: Wed, 20 Dec 2017 00:17:43 +0800 Subject: [PATCH] Fix #26. The code was using the learning_rate from optimizer.pth after starting self critical training. --- train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train.py b/train.py index 674e228d..94096d00 100644 --- a/train.py +++ b/train.py @@ -89,9 +89,9 @@ def train(opt): frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every decay_factor = opt.learning_rate_decay_rate ** frac opt.current_lr = opt.learning_rate * decay_factor - utils.set_lr(optimizer, opt.current_lr) # set the decayed rate else: opt.current_lr = opt.learning_rate + utils.set_lr(optimizer, opt.current_lr) # Assign the scheduled sampling prob if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0: frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every