-
Notifications
You must be signed in to change notification settings - Fork 0
/
pg_mc.py
156 lines (122 loc) · 4.66 KB
/
pg_mc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import os
import click
import gym
import gym.wrappers
import gym_minigrid
import numpy as np
import pybulletgym
import torch
import torch.nn as nn
from all_the_tools.config import load_config
from all_the_tools.metrics import Mean, Last, FPS
from all_the_tools.torch.utils import seed_torch
from tensorboardX import SummaryWriter
from tqdm import tqdm
import utils
import wrappers
import wrappers.torch
from algo.common import build_env
from algo.common import build_optimizer
from history import History
from model import Model
pybulletgym
gym_minigrid
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# TODO: train/eval
# TODO: return normalization
@click.command()
@click.option("--config-path", type=click.Path(), required=True)
@click.option("--experiment-path", type=click.Path(), required=True)
@click.option("--restore-path", type=click.Path())
@click.option("--render", is_flag=True)
def main(config_path, **kwargs):
config = load_config(config_path, **kwargs)
del config_path, kwargs
writer = SummaryWriter(config.experiment_path)
seed_torch(config.seed)
env = wrappers.Batch(build_env(config))
if config.render:
env = wrappers.TensorboardBatchMonitor(env, writer, config.log_interval)
env = wrappers.torch.Torch(env, device=DEVICE)
env.seed(config.seed)
model = Model(config.model, env.observation_space, env.action_space)
model = model.to(DEVICE)
if config.restore_path is not None:
model.load_state_dict(torch.load(config.restore_path))
optimizer = build_optimizer(config.opt, model.parameters())
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.episodes)
metrics = {
"loss": Mean(),
"lr": Last(),
"eps": FPS(),
"ep/length": Mean(),
"ep/return": Mean(),
"rollout/reward": Mean(),
"rollout/advantage": Mean(),
"rollout/entropy": Mean(),
}
# training loop ====================================================================================================
for episode in tqdm(range(config.episodes), desc="training"):
hist = History()
s = env.reset()
h = model.zero_state(1)
d = torch.ones(1, dtype=torch.bool)
model.eval()
with torch.no_grad():
while True:
trans = hist.append_transition()
trans.record(state=s, hidden=h, done=d)
a, _, h = model(s, h, d)
a = a.sample()
s, r, d, info = env.step(a)
trans.record(action=a, reward=r)
if d:
break
# optimization =================================================================================================
model.train()
# build rollout
rollout = hist.full_rollout()
# loss
loss = compute_loss(env, model, rollout, metrics, config)
# metrics
metrics["loss"].update(loss.data.cpu().numpy())
metrics["lr"].update(np.squeeze(scheduler.get_last_lr()))
# training
optimizer.zero_grad()
loss.mean().backward()
if config.grad_clip_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip_norm)
optimizer.step()
scheduler.step()
metrics["eps"].update(1)
metrics["ep/length"].update(info[0]["episode"]["l"])
metrics["ep/return"].update(info[0]["episode"]["r"])
if episode % config.log_interval == 0 and episode > 0:
for k in metrics:
writer.add_scalar(k, metrics[k].compute_and_reset(), global_step=episode)
torch.save(
model.state_dict(),
os.path.join(config.experiment_path, "model_{}.pth".format(episode)),
)
def compute_loss(env, model, rollout, metrics, config):
dist, _, _ = model(rollout.state, rollout.hidden[:, 0], rollout.done)
returns = utils.total_discounted_return(rollout.reward, gamma=config.gamma)
# actor
advantages = returns.detach()
if config.adv_norm:
advantages = utils.normalize(advantages)
log_prob = dist.log_prob(rollout.action)
entropy = dist.entropy()
if isinstance(env.action_space, gym.spaces.Box):
log_prob = log_prob.sum(-1)
entropy = entropy.sum(-1)
actor_loss = -log_prob * advantages + config.entropy_weight * -entropy
# loss
loss = actor_loss.mean(1)
# metrics
metrics["rollout/reward"].update(rollout.reward.data.cpu().numpy())
metrics["rollout/advantage"].update(advantages.data.cpu().numpy())
metrics["rollout/entropy"].update(entropy.data.cpu().numpy())
return loss
if __name__ == "__main__":
main()