-
Notifications
You must be signed in to change notification settings - Fork 14.9k
/
gan.py
157 lines (128 loc) · 5.52 KB
/
gan.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
""" Generative Adversarial Networks (GAN).
Using generative adversarial networks (GAN) to generate digit images from a
noise distribution.
References:
- Generative adversarial nets. I Goodfellow, J Pouget-Abadie, M Mirza,
B Xu, D Warde-Farley, S Ozair, Y. Bengio. Advances in neural information
processing systems, 2672-2680.
- Understanding the difficulty of training deep feedforward neural networks.
X Glorot, Y Bengio. Aistats 9, 249-256
Links:
- [GAN Paper](https://arxiv.org/pdf/1406.2661.pdf).
- [MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
- [Xavier Glorot Init](www.cs.cmu.edu/~bhiksha/courses/deeplearning/Fall.../AISTATS2010_Glorot.pdf).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Training Params
num_steps = 100000
batch_size = 128
learning_rate = 0.0002
# Network Params
image_dim = 784 # 28*28 pixels
gen_hidden_dim = 256
disc_hidden_dim = 256
noise_dim = 100 # Noise data points
# A custom initialization (see Xavier Glorot init)
def glorot_init(shape):
return tf.random_normal(shape=shape, stddev=1. / tf.sqrt(shape[0] / 2.))
# Store layers weight & bias
weights = {
'gen_hidden1': tf.Variable(glorot_init([noise_dim, gen_hidden_dim])),
'gen_out': tf.Variable(glorot_init([gen_hidden_dim, image_dim])),
'disc_hidden1': tf.Variable(glorot_init([image_dim, disc_hidden_dim])),
'disc_out': tf.Variable(glorot_init([disc_hidden_dim, 1])),
}
biases = {
'gen_hidden1': tf.Variable(tf.zeros([gen_hidden_dim])),
'gen_out': tf.Variable(tf.zeros([image_dim])),
'disc_hidden1': tf.Variable(tf.zeros([disc_hidden_dim])),
'disc_out': tf.Variable(tf.zeros([1])),
}
# Generator
def generator(x):
hidden_layer = tf.matmul(x, weights['gen_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['gen_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['gen_out'])
out_layer = tf.add(out_layer, biases['gen_out'])
out_layer = tf.nn.sigmoid(out_layer)
return out_layer
# Discriminator
def discriminator(x):
hidden_layer = tf.matmul(x, weights['disc_hidden1'])
hidden_layer = tf.add(hidden_layer, biases['disc_hidden1'])
hidden_layer = tf.nn.relu(hidden_layer)
out_layer = tf.matmul(hidden_layer, weights['disc_out'])
out_layer = tf.add(out_layer, biases['disc_out'])
out_layer = tf.nn.sigmoid(out_layer)
return out_layer
# Build Networks
# Network Inputs
gen_input = tf.placeholder(tf.float32, shape=[None, noise_dim], name='input_noise')
disc_input = tf.placeholder(tf.float32, shape=[None, image_dim], name='disc_input')
# Build Generator Network
gen_sample = generator(gen_input)
# Build 2 Discriminator Networks (one from noise input, one from generated samples)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample)
# Build Loss
gen_loss = -tf.reduce_mean(tf.log(disc_fake))
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
# Build Optimizers
optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate)
optimizer_disc = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Training Variables for each optimizer
# By default in TensorFlow, all variables are updated by each optimizer, so we
# need to precise for each one of them the specific variables to update.
# Generator Network Variables
gen_vars = [weights['gen_hidden1'], weights['gen_out'],
biases['gen_hidden1'], biases['gen_out']]
# Discriminator Network Variables
disc_vars = [weights['disc_hidden1'], weights['disc_out'],
biases['disc_hidden1'], biases['disc_out']]
# Create training operations
train_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars)
train_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
for i in range(1, num_steps+1):
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
batch_x, _ = mnist.train.next_batch(batch_size)
# Generate noise to feed to the generator
z = np.random.uniform(-1., 1., size=[batch_size, noise_dim])
# Train
feed_dict = {disc_input: batch_x, gen_input: z}
_, _, gl, dl = sess.run([train_gen, train_disc, gen_loss, disc_loss],
feed_dict=feed_dict)
if i % 1000 == 0 or i == 1:
print('Step %i: Generator Loss: %f, Discriminator Loss: %f' % (i, gl, dl))
# Generate images from noise, using the generator network.
f, a = plt.subplots(4, 10, figsize=(10, 4))
for i in range(10):
# Noise input.
z = np.random.uniform(-1., 1., size=[4, noise_dim])
g = sess.run([gen_sample], feed_dict={gen_input: z})
g = np.reshape(g, newshape=(4, 28, 28, 1))
# Reverse colours for better display
g = -1 * (g - 1)
for j in range(4):
# Generate image from noise. Extend to 3 channels for matplot figure.
img = np.reshape(np.repeat(g[j][:, :, np.newaxis], 3, axis=2),
newshape=(28, 28, 3))
a[j][i].imshow(img)
f.show()
plt.draw()
plt.waitforbuttonpress()