diff --git a/check_mkl.py b/check_mkl.py index b89c798..87d34f0 100644 --- a/check_mkl.py +++ b/check_mkl.py @@ -1,22 +1,15 @@ - # coding: utf-8 -# In[5]: - - from __future__ import division, print_function, absolute_import import tensorflow as tf + from tensorflow.python.client import timeline # Import MNIST data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) - -# In[2]: - - # Training Parameters learning_rate = 0.001 num_steps = 500 @@ -24,20 +17,18 @@ display_step = 10 # Network Parameters -num_input = 784 # MNIST data input (img shape: 28*28) -num_classes = 10 # MNIST total classes (0-9 digits) -dropout = 0.75 # Dropout, probability to keep units +num_input = 784 # MNIST data input (img shape: 28*28) +num_classes = 10 # MNIST total classes (0-9 digits) +dropout = 0.75 # Dropout, probability to keep units # tf Graph input X = tf.placeholder(tf.float32, [None, num_input]) Y = tf.placeholder(tf.float32, [None, num_classes]) -keep_prob = tf.placeholder(tf.float32) # dropout (keep probability) - +keep_prob = tf.placeholder(tf.float32) # dropout (keep probability) -# In[3]: +# Create some wrappers for simplicity -# Create some wrappers for simplicity def conv2d(x, W, b, strides=1): # Conv2D wrapper, with bias and relu activation x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') @@ -50,8 +41,9 @@ def maxpool2d(x, k=2): return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') - # Create model + + def conv_net(x, weights, biases, dropout): # MNIST data input is a 1-D vector of 784 features (28*28 pixels) # Reshape to match picture format [Height x Width x Channel] @@ -80,10 +72,6 @@ def conv_net(x, weights, biases, dropout): out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) return out - -# In[4]: - - # Store layers weight & bias weights = { # 5x5 conv, 1 input, 32 outputs @@ -91,7 +79,7 @@ def conv_net(x, weights, biases, dropout): # 5x5 conv, 32 inputs, 64 outputs 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # fully connected, 7*7*64 inputs, 1024 outputs - 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), + 'wd1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])), # 1024 inputs, 10 outputs (class prediction) 'out': tf.Variable(tf.random_normal([1024, num_classes])) } @@ -113,7 +101,6 @@ def conv_net(x, weights, biases, dropout): optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) - # Evaluate model correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) @@ -121,10 +108,6 @@ def conv_net(x, weights, biases, dropout): # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() - -# In[6]: - - # Start training with tf.Session() as sess: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) @@ -133,19 +116,10 @@ def conv_net(x, weights, biases, dropout): # Run the initializer sess.run(init) - for step in range(1, num_steps+1): - batch_x, batch_y = mnist.train.next_batch(batch_size) - # Run optimization op (backprop) - sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: dropout}) - if step % display_step == 0 or step == 1: - # Calculate batch loss and accuracy - loss, acc = sess.run([loss_op, accuracy], - feed_dict={X: batch_x, Y: batch_y, keep_prob: 1.0}, - options=run_options, - run_metadata=run_metadata) - tl = timeline.Timeline(run_metadata.step_stats) - ctf = tl.generate_chrome_trace_format() - with open('timeline.json', 'w') as f: - f.write(ctf) - break - + batch_x, batch_y = mnist.train.next_batch(batch_size) + # Run optimization op (backprop) + sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: dropout}) + tl = timeline.Timeline(run_metadata.step_stats) + ctf = tl.generate_chrome_trace_format() + with open('timeline.json', 'w') as f: + f.write(ctf)