-
Notifications
You must be signed in to change notification settings - Fork 4
/
DPED.py
341 lines (294 loc) · 18.7 KB
/
DPED.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
from __future__ import division
import os
import time
import tensorflow as tf
import scipy.misc
import scipy.io
import numpy as np
from glob import glob
from utils import *
from ops import *
from vgg19 import *
class DPED(object):
def __init__(self, sess, config, dataset_phone, dataset_dslr):
# copy training parameters
self.sess = sess
self.config = config
self.batch_size = config.batch_size
self.patch_size = config.patch_size
self.mode = config.mode
self.channels = config.channels
self.augmentation = config.augmentation
self.checkpoint_dir = config.checkpoint_dir
self.content_layer = config.content_layer
self.vgg_dir = config.vgg_dir
self.dataset_name = config.dataset_name
self.dataset_phone = dataset_phone
self.dataset_dslr = dataset_dslr
# loss weights
self.w_content = config.w_content
self.w_texture = config.w_texture
self.w_color = config.w_color
self.w_tv = config.w_tv
# patches for training (fixed size)
self.phone_patch = tf.placeholder(tf.float32, [self.batch_size, self.patch_size, self.patch_size, self.channels], name='input_phone_patch')
self.dslr_patch = tf.placeholder(tf.float32, [self.batch_size, self.patch_size, self.patch_size, self.channels], name='input_dslr_patch')
# images for testing (unknown size)
self.phone_test = tf.placeholder(tf.float32, [None, self.patch_size, self.patch_size, self.channels], name='input_phone_test')
self.phone_test_unknown = tf.placeholder(tf.float32, [None, None, None, self.channels], name='input_phone_test_unknown_size')
self.dslr_test = tf.placeholder(tf.float32, [None, self.patch_size, self.patch_size, self.channels], name='input_dslr_test')
# input to discriminator network
self.input_discriminator = tf.placeholder(tf.float32, [self.batch_size, self.patch_size, self.patch_size, self.channels], name='input_discriminator')
# builc models
self.build_generator()
self.build_discriminator()
# build loss function (color + texture + content + TV)
self.build_generator_loss()
tf.global_variables_initializer().run(session=self.sess)
#self.build_log_summary()
self.saver = tf.train.Saver(tf.trainable_variables())
def build_generator(self):
self.enhanced_patch = self.generator_network(self.phone_patch)
self.enhanced_test = self.generator_network(self.phone_test)
self.enhanced_test_unknown = self.generator_network(self.phone_test_unknown)
variables = tf.trainable_variables()
self.g_var = [x for x in variables if 'generator' in x.name]
print("Completed building generator. Number of variables:",len(self.g_var))
#print(self.g_var)
def generator_network(self, image):
with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):
# conv. layer before residual blocks
b1_in = tf.layers.conv2d(image, 64, 9, strides = 1, padding = 'SAME', name = 'CONV_1', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
b1_in = tf.nn.relu(b1_in)
# residual blocks
b1_out = self.resblock(b1_in, 1)
b2_out = self.resblock(b1_out, 2)
b3_out = self.resblock(b2_out, 3)
b4_out = self.resblock(b3_out, 4)
# conv. layers after residual blocks
temp = tf.layers.conv2d(b4_out, 64, 3, strides = 1, padding = 'SAME', name = 'CONV_2', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.nn.relu(temp)
temp = tf.layers.conv2d(temp, 64, 3, strides = 1, padding = 'SAME', name = 'CONV_3', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.nn.relu(temp)
temp = tf.layers.conv2d(temp, 64, 3, strides = 1, padding = 'SAME', name = 'CONV_4', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.nn.relu(temp)
temp = tf.layers.conv2d(temp, 3, 1, strides = 1, padding = 'SAME', name = 'CONV_5', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
return temp
def resblock(self, feature_in, num):
# subblock (conv. + BN + relu)
temp = tf.layers.conv2d(feature_in, 64, 3, strides = 1, padding = 'SAME', name = ('resblock_%d_CONV_1' %num), kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
#temp = tf.layers.batch_normalization(temp, name = ('resblock_%d_BN_1' %num))
temp = tf.nn.relu(temp)
# subblock (conv. + BN + relu)
temp = tf.layers.conv2d(temp, 64, 3, strides = 1, padding = 'SAME', name = ('resblock_%d_CONV_2' %num), kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
#temp = tf.layers.batch_normalization(temp, name = ('resblock_%d_BN_2' %num))
temp = tf.nn.relu(temp)
return temp + feature_in
def build_generator_loss(self):
# color loss (blur + mse) - since output values are normalized, color loss should be multiplied by 255
self.color_loss = 255 * tf.reduce_mean(tf.square(gaussian_blur(self.dslr_patch)-gaussian_blur(self.enhanced_patch)))
# texture loss (gan)
self.texture_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.logits_dslr, self.logits_enhanced))
# content loss (vgg)
enhanced_vgg = net(self.vgg_dir, self.enhanced_patch * 255)
dslr_vgg = net(self.vgg_dir, self.dslr_patch * 255)
self.content_loss = tf.reduce_mean(tf.square(enhanced_vgg[self.content_layer] - dslr_vgg[self.content_layer]))
# tv loss (tv)
self.tv_loss = tf.reduce_mean(tf.image.total_variation(self.enhanced_patch))
# calculate generator loss as a weighted sum of the above 4 losses
self.G_loss = self.color_loss * self.w_color + self.texture_loss * self.w_texture + self.content_loss * self.w_content + self.tv_loss * self.w_tv
self.G_optimizer = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.G_loss, var_list=self.g_var)
def build_discriminator(self):
self.logits_phone, _ = self.discriminator_network(self.phone_patch)
self.logits_dslr, _ = self.discriminator_network(self.dslr_patch)
self.logits_enhanced, _ = self.discriminator_network(self.enhanced_patch)
_, self.prob = self.discriminator_network(self.phone_test)
variables = tf.trainable_variables()
self.d_var = [x for x in variables if 'discriminator' in x.name]
print("Completed building discriminator. Number of variables:",len(self.d_var))
d_loss_real = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.logits_dslr, tf.ones_like(self.logits_dslr)))
d_loss_fake = tf.reduce_mean(sigmoid_cross_entropy_with_logits(self.logits_phone, tf.zeros_like(self.logits_phone)))
self.d_loss = d_loss_real + d_loss_fake
self.D_optimizer = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.d_loss, var_list=self.d_var)
def discriminator_network(self, image):
with tf.variable_scope('discriminator', reuse = tf.AUTO_REUSE):
#convert to grayscale image
image_gray = tf.image.rgb_to_grayscale(image)
# conv layer 1
temp = tf.layers.conv2d(image_gray, 48, 11, strides = 4, padding = 'SAME', name = 'CONV_1', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = lrelu(temp)
# conv layer 2
temp = tf.layers.conv2d(temp, 128, 5, strides = 2, padding = 'SAME', name = 'CONV_2', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.layers.batch_normalization(temp, name = 'BN_2')
temp = lrelu(temp)
# conv layer 3
temp = tf.layers.conv2d(temp, 192, 3, strides = 1, padding = 'SAME', name = 'CONV_3', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.layers.batch_normalization(temp, name = 'BN_3')
temp = lrelu(temp)
# conv layer 4
temp = tf.layers.conv2d(temp, 192, 3, strides = 1, padding = 'SAME', name = 'CONV_4', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.layers.batch_normalization(temp, name = 'BN_4')
temp = lrelu(temp)
# conv layer 5
temp = tf.layers.conv2d(temp, 128, 3, strides = 2, padding = 'SAME', name = 'CONV_5', kernel_initializer = tf.contrib.layers.xavier_initializer(), reuse=tf.AUTO_REUSE)
temp = tf.layers.batch_normalization(temp, name = 'BN_5')
temp = lrelu(temp)
# FC layer 1
fc_in = tf.contrib.layers.flatten(temp)
fc_out = tf.layers.dense(fc_in, units = 1024, activation = None)
fc_out = lrelu(fc_out)
# FC layer 2
logits = tf.layers.dense(fc_out, units = 1, activation = None)
probability = tf.nn.sigmoid(logits)
return logits, probability
def pretrain_discriminator(self, load = True):
if load == True:
if self.load():
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
else:
print(" Discriminator training starts from beginning")
start = time.time()
for i in range(0, 10000):
phone_batch, dslr_batch = get_batch(self.dataset_phone, self.dataset_dslr, self.config)
_ = self.sess.run(self.D_optimizer , feed_dict={self.phone_patch:phone_batch, self.dslr_patch:dslr_batch})
if i %2000 == 0:
phone_batch, dslr_batch = get_batch(self.dataset_phone, self.dataset_dslr, self.config)
d_loss = self.sess.run(self.d_loss , feed_dict={self.phone_patch:phone_batch, self.dslr_patch:dslr_batch})
print("Iteration %d, runtime: %.3f s, discriminator loss: %.6f" %(i, time.time()-start, d_loss))
self.test_discriminator(200)
print("pretraining complete")
self.save()
def train(self, load = True):
if load == True:
if self.load():
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
else:
print(" Overall training starts from beginning")
start = time.time()
for i in range(0, 100000):
phone_batch, dslr_batch = get_batch(self.dataset_phone, self.dataset_dslr, self.config)
_, enhanced_batch = self.sess.run([self.G_optimizer, self.enhanced_patch] , feed_dict={self.phone_patch:phone_batch, self.dslr_patch:dslr_batch})
_ = self.sess.run(self.D_optimizer , feed_dict={self.phone_patch:enhanced_batch, self.dslr_patch:dslr_batch})
if i %1000 == 0:
phone_batch, dslr_batch = get_batch(self.dataset_phone, self.dataset_dslr, self.config)
#g_loss = self.sess.run(self.G_loss , feed_dict={self.phone_patch:phone_batch, self.dslr_patch:dslr_batch})
g_loss, color_loss, texture_loss, content_loss, tv_loss = self.sess.run([self.G_loss, self.color_loss, self.texture_loss, self.content_loss, self.tv_loss] , feed_dict={self.phone_patch:phone_batch, self.dslr_patch:dslr_batch})
print("Iteration %d, runtime: %.3f s, generator loss: %.6f" %(i, time.time()-start, g_loss))
print("Loss per component: color %.6f, texture %.6f, content %.6f, tv %.6f" %(color_loss, texture_loss, content_loss, tv_loss))
# during training, test for only patches (full image testing incurs memory issues...)
self.test_generator(200, 0)
self.save()
def test_discriminator(self, test_num, load = False, mode = "phone_dslr"):
if load == True:
if self.load():
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
#print("testing discriminator")
test_list_dslr = sorted(glob(self.config.test_path_dslr_patch))
test_list_phone = sorted(glob(self.config.test_path_phone_patch))
#print("total testset: %d image pairs" %len(test_list_dslr))
# test dlsr
acc_dslr = 0
acc_phone = 0
acc_enhanced = 0
indexes = []
probs = np.zeros([test_num])
for i in range(test_num):
index = np.random.randint(len(test_list_dslr))
indexes.append(index)
test_patch_dslr = preprocess(scipy.misc.imread(test_list_dslr[index], mode = "RGB").astype("float32"))
prob = self.sess.run(self.prob, feed_dict={self.phone_test: [test_patch_dslr]})
if prob > 0.5:
probs[i] = prob
acc_dslr += 1
test_patch_phone = preprocess(scipy.misc.imread(test_list_phone[index], mode = "RGB").astype("float32"))
prob = self.sess.run(self.prob, feed_dict={self.phone_test: [test_patch_phone]})
if prob < 0.5:
acc_phone += 1
if mode == "enhanced":
test_patch_enhanced = self.sess.run(self.enhanced_test , feed_dict={self.phone_test:[test_patch_phone], self.dslr_test:[test_patch_dslr]})
prob = self.sess.run(self.prob, feed_dict={self.phone_test: [test_patch_enhanced[0]]})
if prob < 0.5:
acc_enhanced += 1
if mode == "enhanced":
print("Dricriminator test accuracy: phone: %d/%d, dslr: %d/%d, enhanced: %d/%d" %(acc_phone, test_num, acc_dslr, test_num, acc_enhanced , test_num))
else:
print("Discriminator test accuracy: phone: %d/%d, dslr: %d/%d" %(acc_phone, test_num, acc_dslr, test_num))
def test_generator(self, test_num_patch = 200, test_num_image = 5, load = False):
if load == True:
if self.load():
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
# test for patches
start = time.time()
self.test_discriminator(200, load = False, mode = "enhanced")
test_list_phone = sorted(glob(self.config.test_path_phone_patch))
test_list_dslr = sorted(glob(self.config.test_path_dslr_patch))
PSNR_phone_enhanced_list = np.zeros([test_num_patch])
PSNR_dslr_enhanced_list = np.zeros([test_num_patch])
indexes = []
for i in range(test_num_patch):
index = np.random.randint(len(test_list_dslr))
indexes.append(index)
test_patch_phone = preprocess(scipy.misc.imread(test_list_phone[index], mode = "RGB").astype("float32"))
test_patch_dslr = preprocess(scipy.misc.imread(test_list_dslr[index], mode = "RGB").astype("float32"))
test_patch_enhanced = self.sess.run(self.enhanced_test , feed_dict={self.phone_test:[test_patch_phone], self.dslr_test:[test_patch_dslr]})
if i % 50 == 0:
imageio.imwrite(("./samples/%s/patch/phone_%d.png" %(self.config.dataset_name, i)), postprocess(test_patch_phone))
imageio.imwrite(("./samples/%s/patch/dslr_%d.png" %(self.config.dataset_name,i)), postprocess(test_patch_dslr))
imageio.imwrite(("./samples/%s/patch/enhanced_%d.png" %(self.config.dataset_name,i)), postprocess(test_patch_enhanced[0]))
#print(enhanced_test_patch.shape)
PSNR = calc_PSNR(postprocess(test_patch_enhanced[0]), postprocess(test_patch_phone))
#print("PSNR: %.3f" %PSNR)
PSNR_phone_enhanced_list[i] = PSNR
PSNR = calc_PSNR(postprocess(test_patch_enhanced[0]), postprocess(test_patch_dslr))
#print("PSNR: %.3f" %PSNR)
PSNR_dslr_enhanced_list[i] = PSNR
print("(runtime: %.3f s) Average test PSNR for %d random test image patches: phone-enhanced %.3f, dslr-enhanced %.3f" %(time.time()-start, test_num_patch, np.mean(PSNR_phone_enhanced_list), np.mean(PSNR_dslr_enhanced_list) ))
# test for images
start = time.time()
test_list_phone = sorted(glob(self.config.test_path_phone_image))
PSNR_phone_enhanced_list = np.zeros([test_num_image])
PSNR_dslr_enhanced_list = np.zeros([test_num_image])
indexes = []
for i in range(test_num_image):
#index = np.random.randint(len(test_list_phone))
index = i
indexes.append(index)
test_image_phone = preprocess(scipy.misc.imread(test_list_phone[index], mode = "RGB").astype("float32"))
test_image_enhanced = self.sess.run(self.enhanced_test_unknown , feed_dict={self.phone_test_unknown:[test_image_phone]})
imageio.imwrite(("./samples/%s/image/phone_%d.png" %(self.config.dataset_name, i)), postprocess(test_image_phone))
imageio.imwrite(("./samples/%s/image/enhanced_%d.png" %(self.config.dataset_name, i)), postprocess(test_image_enhanced[0]))
PSNR = calc_PSNR(postprocess(test_image_enhanced[0]), postprocess(test_image_phone))
#print("PSNR: %.3f" %PSNR)
PSNR_phone_enhanced_list[i] = PSNR
if test_num_image > 0:
print("(runtime: %.3f s) Average test PSNR for %d random full test images: phone-enhanced %.3f" %(time.time()-start, test_num_image, np.mean(PSNR_phone_enhanced_list)))
def build_log_summary(self):
self.output_summary = tf.summary.image("output", self.enhanced_test)
self.loss_summary = tf.summary.scalar("loss", self.G_loss)
self.summary_merged = tf.summary.merge([self.output_summary, self.loss_summary])
self.writer = tf.summary.FileWriter("./logs", self.sess.graph)
def save(self):
model_name = "DPED"
checkpoint_dir = os.path.join(self.checkpoint_dir, self.dataset_name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), write_meta_graph=False)
def load(self):
checkpoint_dir = os.path.join(self.checkpoint_dir, self.dataset_name)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
print("Loading checkpoints from ",checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
model_name = "DPED"
self.saver.restore(self.sess, os.path.join(checkpoint_dir, model_name))
return True
else:
return False