-
Notifications
You must be signed in to change notification settings - Fork 0
/
mario_agent.py
63 lines (38 loc) · 1.68 KB
/
mario_agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from keras.optimizers import Adam
from keras.layers import *
from keras.models import Sequential
from runner import GymRunner
from qlearning_agent import QLearningAgent
class Agent(QLearningAgent):
def __init__(self):
super().__init__(13)
def build_model(self):
n_actions = self.action_size
n_frames = 3
width_size = 224
height_size = 256
model = Sequential()
model.add(
Conv2D(32, (5, 5), strides=(4, 4), activation='relu', padding='same', input_shape=(n_frames, width_size, height_size), data_format='channels_first'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), strides=(2, 2), activation='relu', padding='same', data_format='channels_first'))
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3), strides=(2, 2), activation='relu', padding='same', data_format='channels_first'))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), strides=(2, 2), activation='relu', padding='same', data_format='channels_first'))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(256, activation='relu'))
model.add(Reshape((1, 256)))
model.add(LSTM(256))
model.add(Dense(n_actions, activation='softmax'))
print(model.summary())
model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
# load the weights of the model if reusing previous training session
# model.load_weights("models/mario-v0.h5")
return model
if __name__ == '__main__':
gym = GymRunner('Mario-v0')
agent = Agent()
gym.train(agent, 100000)
gym.run(agent, 5000)