📚 The CoCalc Library - books, templates and other resources
License: OTHER
""" Using convolutional net on MNIST dataset of handwritten digit1(http://yann.lecun.com/exdb/mnist/)2Author: Chip Huyen3Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"4cs20si.stanford.edu5"""6from __future__ import print_function7from __future__ import division8from __future__ import print_function910import os11os.environ['TF_CPP_MIN_LOG_LEVEL']='2'1213import time1415import tensorflow as tf16from tensorflow.examples.tutorials.mnist import input_data1718import utils1920N_CLASSES = 102122# Step 1: Read in data23# using TF Learn's built in function to load MNIST data to the folder data/mnist24mnist = input_data.read_data_sets("/data/mnist", one_hot=True)2526# Step 2: Define paramaters for the model27LEARNING_RATE = 0.00128BATCH_SIZE = 12829SKIP_STEP = 1030DROPOUT = 0.7531N_EPOCHS = 13233# Step 3: create placeholders for features and labels34# each image in the MNIST data is of shape 28*28 = 78435# therefore, each image is represented with a 1x784 tensor36# We'll be doing dropout for hidden layer so we'll need a placeholder37# for the dropout probability too38# Use None for shape so we can change the batch_size once we've built the graph39with tf.name_scope('data'):40X = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")41Y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")4243dropout = tf.placeholder(tf.float32, name='dropout')4445# Step 4 + 5: create weights + do inference46# the model is conv -> relu -> pool -> conv -> relu -> pool -> fully connected -> softmax4748global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')4950utils.make_dir('checkpoints')51utils.make_dir('checkpoints/convnet_mnist')5253with tf.variable_scope('conv1') as scope:54# first, reshape the image to [BATCH_SIZE, 28, 28, 1] to make it work with tf.nn.conv2d55# use the dynamic dimension -156images = tf.reshape(X, shape=[-1, 28, 28, 1])5758# TO DO5960# create kernel variable of dimension [5, 5, 1, 32]61# use tf.truncated_normal_initializer()6263# TO DO6465# create biases variable of dimension [32]66# use tf.constant_initializer(0.0)6768# TO DO6970# apply tf.nn.conv2d. strides [1, 1, 1, 1], padding is 'SAME'7172# TO DO7374# apply relu on the sum of convolution output and biases7576# TO DO7778# output is of dimension BATCH_SIZE x 28 x 28 x 327980with tf.variable_scope('pool1') as scope:81# apply max pool with ksize [1, 2, 2, 1], and strides [1, 2, 2, 1], padding 'SAME'8283# TO DO8485# output is of dimension BATCH_SIZE x 14 x 14 x 328687with tf.variable_scope('conv2') as scope:88# similar to conv1, except kernel now is of the size 5 x 5 x 32 x 6489kernel = tf.get_variable('kernels', [5, 5, 32, 64],90initializer=tf.truncated_normal_initializer())91biases = tf.get_variable('biases', [64],92initializer=tf.random_normal_initializer())93conv = tf.nn.conv2d(pool1, kernel, strides=[1, 1, 1, 1], padding='SAME')94conv2 = tf.nn.relu(conv + biases, name=scope.name)9596# output is of dimension BATCH_SIZE x 14 x 14 x 649798with tf.variable_scope('pool2') as scope:99# similar to pool1100pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],101padding='SAME')102103# output is of dimension BATCH_SIZE x 7 x 7 x 64104105with tf.variable_scope('fc') as scope:106# use weight of dimension 7 * 7 * 64 x 1024107input_features = 7 * 7 * 64108109# create weights and biases110111# TO DO112113# reshape pool2 to 2 dimensional114pool2 = tf.reshape(pool2, [-1, input_features])115116# apply relu on matmul of pool2 and w + b117fc = tf.nn.relu(tf.matmul(pool2, w) + b, name='relu')118119# TO DO120121# apply dropout122fc = tf.nn.dropout(fc, dropout, name='relu_dropout')123124with tf.variable_scope('softmax_linear') as scope:125# this you should know. get logits without softmax126# you need to create weights and biases127128# TO DO129130# Step 6: define loss function131# use softmax cross entropy with logits as the loss function132# compute mean cross entropy, softmax is applied internally133with tf.name_scope('loss'):134# you should know how to do this too135136# TO DO137138# Step 7: define training op139# using gradient descent with learning rate of LEARNING_RATE to minimize cost140# don't forgot to pass in global_step141142# TO DO143144with tf.Session() as sess:145sess.run(tf.global_variables_initializer())146saver = tf.train.Saver()147# to visualize using TensorBoard148writer = tf.summary.FileWriter('./my_graph/mnist', sess.graph)149##### You have to create folders to store checkpoints150ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_mnist/checkpoint'))151# if that checkpoint exists, restore from checkpoint152if ckpt and ckpt.model_checkpoint_path:153saver.restore(sess, ckpt.model_checkpoint_path)154155initial_step = global_step.eval()156157start_time = time.time()158n_batches = int(mnist.train.num_examples / BATCH_SIZE)159160total_loss = 0.0161for index in range(initial_step, n_batches * N_EPOCHS): # train the model n_epochs times162X_batch, Y_batch = mnist.train.next_batch(BATCH_SIZE)163_, loss_batch = sess.run([optimizer, loss],164feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})165total_loss += loss_batch166if (index + 1) % SKIP_STEP == 0:167print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / SKIP_STEP))168total_loss = 0.0169saver.save(sess, 'checkpoints/convnet_mnist/mnist-convnet', index)170171print("Optimization Finished!") # should be around 0.35 after 25 epochs172print("Total time: {0} seconds".format(time.time() - start_time))173174# test the model175n_batches = int(mnist.test.num_examples/BATCH_SIZE)176total_correct_preds = 0177for i in range(n_batches):178X_batch, Y_batch = mnist.test.next_batch(BATCH_SIZE)179_, loss_batch, logits_batch = sess.run([optimizer, loss, logits],180feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})181preds = tf.nn.softmax(logits_batch)182correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))183accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))184total_correct_preds += sess.run(accuracy)185186print("Accuracy {0}".format(total_correct_preds/mnist.test.num_examples))187188