-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
155 lines (124 loc) · 4.91 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
from chainer import serializers
from MyFCN import *
from chainer import cuda, optimizers, Variable
import sys
import math
import time
import chainerrl
import State
import os
from pixelwise_a3c import *
from loader import Loader
from test import *
from chainer.datasets import mnist
#_/_/_/ paths _/_/_/
DATA_PATH = "/mnt/hd1/puwenbo/Dataset/registration2D_dataset/new"
SAVE_PATH = "./model/pixel-reg"
#_/_/_/ training parameters _/_/_/
TEST_NUM = 50
LEARNING_RATE = 0.001
TRAIN_BATCH_SIZE = 10
N_EPISODES = 3000
EPISODE_LEN = 20
SNAPSHOT_EPISODES = 300
TEST_EPISODES = 300
GAMMA = 0.95 # discount factor
# DATA_SIZE = 64
DATA_SIZE = 28
#noise setting
# MEAN = 0
# SIGMA = 15
N_ACTIONS = 9
MOVE_RANGE = 3
GPU_ID = 2
def test(loader, agent, fout, episode):
sum_reward = 0
# moving, fixed = loader.load_data(-1)
moving, fixed = loader.load_mnist_test(9)
current_state = State.State()
current_state.reset(moving, fixed)
for t in range(0, EPISODE_LEN):
previous_image = current_state.warp_image.copy()
action, inner_state = agent.act(current_state.tensor)
current_state.step(action, inner_state)
del action, inner_state
reward = np.square(current_state.fixed_image - previous_image)*255 - np.square(current_state.fixed_image - current_state.warp_image)*255
sum_reward += np.mean(reward)*np.power(GAMMA,t)
# if reward > 0:
# break
agent.stop_episode()
if not os.path.exists('./' + str(episode)):
os.mkdir('./' + str(episode))
for i in range(TEST_NUM):
print(np.shape(current_state.warp_image[i][0]))
print(current_state.warp_image[i][0])
warp = np.maximum(0, current_state.warp_image[i][0])
warp = (warp * 255).astype(np.uint8)
cv2.imwrite('./'+str(episode)+'/'+str(i)+'_output.png', warp)
m = np.maximum(0, moving[i][0])
m = (m * 255).astype(np.uint8)
cv2.imwrite('./'+str(episode)+'/'+str(i)+'_moving.png', m)
f = np.maximum(0, fixed[i][0])
f = (f * 255).astype(np.uint8)
cv2.imwrite('./'+str(episode)+'/' + str(i) + '_fixed.png', f)
print("test total reward {a}".format(a=sum_reward*255/TEST_NUM))
fout.write("test total reward {a}\n".format(a=sum_reward*255/TEST_NUM))
sys.stdout.flush()
def main(fout):
loader = Loader(path=DATA_PATH, test_num=TEST_NUM, batch_size=TRAIN_BATCH_SIZE, data_size=DATA_SIZE)
# chainer.cuda.get_device_from_id(GPU_ID).use()
current_state = State.State()
model = MyFcn(N_ACTIONS)
optimizer = chainer.optimizers.Adam(alpha=LEARNING_RATE)
optimizer.setup(model)
agent = PixelWiseA3C_InnerState(model, optimizer, EPISODE_LEN, GAMMA)
# agent.model.to_gpu()
i = 0
for episode in range(1, N_EPISODES+1):
# display current state
print("episode %d" % episode)
fout.write("episode %d\n" % episode)
sys.stdout.flush()
# moving, fixed = loader.load_data(i)
# moving, fixed = loader.load_mnist(9, i)
# random load mnist for all data
moving, fixed = loader.load_mnist_full()
# initialize the current state and reward
current_state.reset(moving, fixed)
del moving, fixed
reward = np.zeros(current_state.warp_image.shape, current_state.warp_image.dtype)
sum_reward = 0
for t in range(0, EPISODE_LEN):
previous_image = current_state.warp_image.copy()
action, inner_state = agent.act_and_train(current_state.tensor, reward)
current_state.step(action, inner_state)
del action, inner_state
reward = np.square(current_state.fixed_image - previous_image)*255 - np.square(current_state.fixed_image - current_state.warp_image)*255
sum_reward += np.mean(reward)*np.power(GAMMA, t)
if t < 30:
print(np.mean(reward))
print(sum_reward)
agent.stop_episode_and_train(current_state.tensor, reward, True)
print("train total reward {a}".format(a=sum_reward*255))
fout.write("train total reward {a}\n".format(a=sum_reward*255))
sys.stdout.flush()
if episode % TEST_EPISODES == 0:
test(loader, agent, fout, episode)
if episode % SNAPSHOT_EPISODES == 0:
agent.save(SAVE_PATH+str(episode))
if (i + 1) * TRAIN_BATCH_SIZE > loader.num:
i = 0
optimizer.alpha = LEARNING_RATE*((1-episode/N_EPISODES)**0.9)
final_test(loader, agent, fout)
if __name__ == '__main__':
fout = open('log.txt', "w")
start = time.time()
main(fout)
end = time.time()
print("{s}[s]".format(s=end - start))
print("{s}[m]".format(s=(end - start)/60))
print("{s}[h]".format(s=(end - start)/60/60))
fout.write("{s}[s]\n".format(s=end - start))
fout.write("{s}[m]\n".format(s=(end - start)/60))
fout.write("{s}[h]\n".format(s=(end - start)/60/60))
fout.close()