-
Notifications
You must be signed in to change notification settings - Fork 1.7k
/
test_her.py
400 lines (336 loc) · 13.3 KB
/
test_her.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
import os
import pathlib
import warnings
from copy import deepcopy
import gym
import numpy as np
import pytest
import torch as th
from stable_baselines3 import DDPG, DQN, SAC, TD3, HerReplayBuffer
from stable_baselines3.common.envs import BitFlippingEnv
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.her.goal_selection_strategy import GoalSelectionStrategy
from stable_baselines3.her.her_replay_buffer import get_time_limit
def test_import_error():
with pytest.raises(ImportError) as excinfo:
from stable_baselines3 import HER
HER("MlpPolicy")
assert "documentation" in str(excinfo.value)
@pytest.mark.parametrize("model_class", [SAC, TD3, DDPG, DQN])
@pytest.mark.parametrize("online_sampling", [True, False])
@pytest.mark.parametrize("image_obs_space", [True, False])
def test_her(model_class, online_sampling, image_obs_space):
"""
Test Hindsight Experience Replay.
"""
n_bits = 4
env = BitFlippingEnv(
n_bits=n_bits,
continuous=not (model_class == DQN),
image_obs_space=image_obs_space,
)
model = model_class(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=2,
goal_selection_strategy="future",
online_sampling=online_sampling,
max_episode_length=n_bits,
),
train_freq=4,
gradient_steps=1,
policy_kwargs=dict(net_arch=[64]),
learning_starts=100,
buffer_size=int(2e4),
)
model.learn(total_timesteps=150)
evaluate_policy(model, Monitor(env))
@pytest.mark.parametrize(
"goal_selection_strategy",
[
"final",
"episode",
"future",
GoalSelectionStrategy.FINAL,
GoalSelectionStrategy.EPISODE,
GoalSelectionStrategy.FUTURE,
],
)
@pytest.mark.parametrize("online_sampling", [True, False])
def test_goal_selection_strategy(goal_selection_strategy, online_sampling):
"""
Test different goal strategies.
"""
env = BitFlippingEnv(continuous=True)
normal_action_noise = NormalActionNoise(np.zeros(1), 0.1 * np.ones(1))
model = SAC(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
goal_selection_strategy=goal_selection_strategy,
online_sampling=online_sampling,
max_episode_length=10,
n_sampled_goal=2,
),
train_freq=4,
gradient_steps=1,
policy_kwargs=dict(net_arch=[64]),
learning_starts=100,
buffer_size=int(1e5),
action_noise=normal_action_noise,
)
assert model.action_noise is not None
model.learn(total_timesteps=150)
@pytest.mark.parametrize("model_class", [SAC, TD3, DDPG, DQN])
@pytest.mark.parametrize("use_sde", [False, True])
@pytest.mark.parametrize("online_sampling", [False, True])
def test_save_load(tmp_path, model_class, use_sde, online_sampling):
"""
Test if 'save' and 'load' saves and loads model correctly
"""
if use_sde and model_class != SAC:
pytest.skip("Only SAC has gSDE support")
n_bits = 4
env = BitFlippingEnv(n_bits=n_bits, continuous=not (model_class == DQN))
kwargs = dict(use_sde=True) if use_sde else {}
# create model
model = model_class(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=2,
goal_selection_strategy="future",
online_sampling=online_sampling,
max_episode_length=n_bits,
),
verbose=0,
tau=0.05,
batch_size=128,
learning_rate=0.001,
policy_kwargs=dict(net_arch=[64]),
buffer_size=int(1e5),
gamma=0.98,
gradient_steps=1,
train_freq=4,
learning_starts=100,
**kwargs
)
model.learn(total_timesteps=150)
obs = env.reset()
observations = {key: [] for key in obs.keys()}
for _ in range(10):
obs = env.step(env.action_space.sample())[0]
for key in obs.keys():
observations[key].append(obs[key])
observations = {key: np.array(obs) for key, obs in observations.items()}
# Get dictionary of current parameters
params = deepcopy(model.policy.state_dict())
# Modify all parameters to be random values
random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items())
# Update model parameters with the new random values
model.policy.load_state_dict(random_params)
new_params = model.policy.state_dict()
# Check that all params are different now
for k in params:
assert not th.allclose(params[k], new_params[k]), "Parameters did not change as expected."
params = new_params
# get selected actions
selected_actions, _ = model.predict(observations, deterministic=True)
# Check
model.save(tmp_path / "test_save.zip")
del model
# test custom_objects
# Load with custom objects
custom_objects = dict(learning_rate=2e-5, dummy=1.0)
model_ = model_class.load(str(tmp_path / "test_save.zip"), env=env, custom_objects=custom_objects, verbose=2)
assert model_.verbose == 2
# Check that the custom object was taken into account
assert model_.learning_rate == custom_objects["learning_rate"]
# Check that only parameters that are here already are replaced
assert not hasattr(model_, "dummy")
model = model_class.load(str(tmp_path / "test_save.zip"), env=env)
# check if params are still the same after load
new_params = model.policy.state_dict()
# Check that all params are the same as before save load procedure now
for key in params:
assert th.allclose(params[key], new_params[key]), "Model parameters not the same after save and load."
# check if model still selects the same actions
new_selected_actions, _ = model.predict(observations, deterministic=True)
assert np.allclose(selected_actions, new_selected_actions, 1e-4)
# check if learn still works
model.learn(total_timesteps=150)
# Test that the change of parameters works
model = model_class.load(str(tmp_path / "test_save.zip"), env=env, verbose=3, learning_rate=2.0)
assert model.learning_rate == 2.0
assert model.verbose == 3
# clear file from os
os.remove(tmp_path / "test_save.zip")
@pytest.mark.parametrize("online_sampling", [False, True])
@pytest.mark.parametrize("truncate_last_trajectory", [False, True])
def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory):
"""
Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly
"""
# remove gym warnings
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
warnings.filterwarnings(action="ignore", category=UserWarning, module="gym")
path = pathlib.Path(tmp_path / "replay_buffer.pkl")
path.parent.mkdir(exist_ok=True, parents=True) # to not raise a warning
env = BitFlippingEnv(n_bits=4, continuous=True)
model = SAC(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=2,
goal_selection_strategy="future",
online_sampling=online_sampling,
max_episode_length=4,
),
gradient_steps=1,
train_freq=4,
buffer_size=int(2e4),
policy_kwargs=dict(net_arch=[64]),
seed=1,
)
model.learn(200)
if online_sampling:
old_replay_buffer = deepcopy(model.replay_buffer)
else:
old_replay_buffer = deepcopy(model.replay_buffer.replay_buffer)
model.save_replay_buffer(path)
del model.replay_buffer
with pytest.raises(AttributeError):
model.replay_buffer
# Check that there is no warning
assert len(recwarn) == 0
model.load_replay_buffer(path, truncate_last_traj=truncate_last_trajectory)
if truncate_last_trajectory:
assert len(recwarn) == 1
warning = recwarn.pop(UserWarning)
assert "The last trajectory in the replay buffer will be truncated" in str(warning.message)
else:
assert len(recwarn) == 0
if online_sampling:
n_episodes_stored = model.replay_buffer.n_episodes_stored
assert np.allclose(
old_replay_buffer._buffer["observation"][:n_episodes_stored],
model.replay_buffer._buffer["observation"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["next_obs"][:n_episodes_stored],
model.replay_buffer._buffer["next_obs"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["action"][:n_episodes_stored],
model.replay_buffer._buffer["action"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["reward"][:n_episodes_stored],
model.replay_buffer._buffer["reward"][:n_episodes_stored],
)
# we might change the last done of the last trajectory so we don't compare it
assert np.allclose(
old_replay_buffer._buffer["done"][: n_episodes_stored - 1],
model.replay_buffer._buffer["done"][: n_episodes_stored - 1],
)
else:
replay_buffer = model.replay_buffer.replay_buffer
assert np.allclose(old_replay_buffer.observations["observation"], replay_buffer.observations["observation"])
assert np.allclose(old_replay_buffer.observations["desired_goal"], replay_buffer.observations["desired_goal"])
assert np.allclose(old_replay_buffer.actions, replay_buffer.actions)
assert np.allclose(old_replay_buffer.rewards, replay_buffer.rewards)
assert np.allclose(old_replay_buffer.dones, replay_buffer.dones)
# test if continuing training works properly
reset_num_timesteps = False if truncate_last_trajectory is False else True
model.learn(200, reset_num_timesteps=reset_num_timesteps)
def test_full_replay_buffer():
"""
Test if HER works correctly with a full replay buffer when using online sampling.
It should not sample the current episode which is not finished.
"""
n_bits = 4
env = BitFlippingEnv(n_bits=n_bits, continuous=True)
# use small buffer size to get the buffer full
model = SAC(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=2,
goal_selection_strategy="future",
online_sampling=True,
max_episode_length=n_bits,
),
gradient_steps=1,
train_freq=4,
policy_kwargs=dict(net_arch=[64]),
learning_starts=1,
buffer_size=20,
verbose=1,
seed=757,
)
model.learn(total_timesteps=100)
def test_get_max_episode_length():
dict_env = DummyVecEnv([lambda: BitFlippingEnv()])
# Cannot infer max epsiode length
with pytest.raises(ValueError):
get_time_limit(dict_env, current_max_episode_length=None)
default_length = 10
assert get_time_limit(dict_env, current_max_episode_length=default_length) == default_length
env = gym.make("CartPole-v1")
vec_env = DummyVecEnv([lambda: env])
assert get_time_limit(vec_env, current_max_episode_length=None) == 500
# Overwrite max_episode_steps
assert get_time_limit(vec_env, current_max_episode_length=default_length) == default_length
# Set max_episode_steps to None
env.spec.max_episode_steps = None
vec_env = DummyVecEnv([lambda: env])
with pytest.raises(ValueError):
get_time_limit(vec_env, current_max_episode_length=None)
# Initialize HER and specify max_episode_length, should not raise an issue
DQN("MultiInputPolicy", dict_env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=dict(max_episode_length=5))
with pytest.raises(ValueError):
DQN("MultiInputPolicy", dict_env, replay_buffer_class=HerReplayBuffer)
# Wrapped in a timelimit, should be fine
# Note: it requires env.spec to be defined
env = DummyVecEnv([lambda: gym.wrappers.TimeLimit(BitFlippingEnv(), 10)])
DQN("MultiInputPolicy", env, replay_buffer_class=HerReplayBuffer, replay_buffer_kwargs=dict(max_episode_length=5))
@pytest.mark.parametrize("online_sampling", [False, True])
@pytest.mark.parametrize("n_bits", [10])
def test_performance_her(online_sampling, n_bits):
"""
That DQN+HER can solve BitFlippingEnv.
It should not work when n_sampled_goal=0 (DQN alone).
"""
env = BitFlippingEnv(n_bits=n_bits, continuous=False)
model = DQN(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=5,
goal_selection_strategy="future",
online_sampling=online_sampling,
max_episode_length=n_bits,
),
verbose=1,
learning_rate=5e-4,
train_freq=1,
learning_starts=100,
exploration_final_eps=0.02,
target_update_interval=500,
seed=0,
batch_size=32,
buffer_size=int(1e5),
)
model.learn(total_timesteps=5000, log_interval=50)
# 90% training success
assert np.mean(model.ep_success_buffer) > 0.90