-
Notifications
You must be signed in to change notification settings - Fork 16
/
Replay_Buffer.py
115 lines (50 loc) · 3.02 KB
/
Replay_Buffer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 28 10:13:32 2020
@author: Administrator
"""
from collections import namedtuple, deque
import random
import torch
import numpy as np
class Replay_Buffer(object):
"""Replay buffer to store past experiences that the agent can then use for training data"""
def __init__(self, buffer_size, batch_size, seed):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def add_experience(self, states, actions, rewards, next_states, dones):
"""Adds experience(s) into the replay buffer"""
if type(dones) == list:
assert type(dones[0]) != list, "A done shouldn't be a list"
experiences = [self.experience(state, action, reward, next_state, done)
for state, action, reward, next_state, done in
zip(states, actions, rewards, next_states, dones)]
self.memory.extend(experiences)
else:
experience = self.experience(states, actions, rewards, next_states, dones)
self.memory.append(experience)
def sample(self, num_experiences=None, separate_out_data_types=True):
"""Draws a random sample of experience from the replay buffer"""
experiences = self.pick_experiences(num_experiences)
if separate_out_data_types:
states, actions, rewards, next_states, dones = self.separate_out_data_types(experiences)
return states, actions, rewards, next_states, dones
else:
return experiences
def separate_out_data_types(self, experiences):
"""Puts the sampled experience into the correct format for a PyTorch neural network"""
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(self.device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(self.device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)
dones = torch.from_numpy(np.vstack([int(e.done) for e in experiences if e is not None])).float().to(self.device)
return states, actions, rewards, next_states, dones
def pick_experiences(self, num_experiences=None):
if num_experiences is not None: batch_size = num_experiences
else: batch_size = self.batch_size
return random.sample(self.memory, k=batch_size)
def __len__(self):
return len(self.memory)