Skip to content

Commit

Permalink
adding current lr to stats for monitoring
Browse files Browse the repository at this point in the history
Signed-off-by: Shakti Kumar <[email protected]>
  • Loading branch information
shaktikshri committed Mar 30, 2020
1 parent 4f1da87 commit 9b293c9
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,8 @@ def learn(
"baseline_loss": 0,
"entropy_loss": 0,
"num_unpadded_steps": 0,
"len_max_traj": 0
"len_max_traj": 0,
"learning_rate":optimizer.param_groups[0]['lr']
}

logging.debug('AT LEARN')
Expand Down Expand Up @@ -755,6 +756,7 @@ def lr_lambda(epoch):
"pg_loss",
"baseline_loss",
"entropy_loss",
"learning_rate",
]
logger.info("# Step\t%s", "\t".join(stat_keys))

Expand Down Expand Up @@ -905,13 +907,12 @@ def checkpoint():
# best_val_loss = val_loss

logging.info(
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s\n%s",
"Steps %i @ %.1f SPS. Loss %f. %sStats:\n%s",
step,
sps,
total_loss,
mean_return,
pprint.pformat(stats),
optimizer.params_group[0]['lr']
)
except KeyboardInterrupt:
return # Try joining actors then quit.
Expand Down

0 comments on commit 9b293c9

Please sign in to comment.