Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

📚 The CoCalc Library - books, templates and other resources

132927 views
License: OTHER
1
""" Using convolutional net on MNIST dataset of handwritten digit
2
(http://yann.lecun.com/exdb/mnist/)
3
Author: Chip Huyen
4
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
5
cs20si.stanford.edu
6
"""
7
from __future__ import print_function
8
from __future__ import division
9
from __future__ import print_function
10
11
import os
12
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
13
14
import time
15
16
import tensorflow as tf
17
from tensorflow.examples.tutorials.mnist import input_data
18
19
import utils
20
21
N_CLASSES = 10
22
23
# Step 1: Read in data
24
# using TF Learn's built in function to load MNIST data to the folder data/mnist
25
mnist = input_data.read_data_sets("/data/mnist", one_hot=True)
26
27
# Step 2: Define paramaters for the model
28
LEARNING_RATE = 0.001
29
BATCH_SIZE = 128
30
SKIP_STEP = 10
31
DROPOUT = 0.75
32
N_EPOCHS = 1
33
34
# Step 3: create placeholders for features and labels
35
# each image in the MNIST data is of shape 28*28 = 784
36
# therefore, each image is represented with a 1x784 tensor
37
# We'll be doing dropout for hidden layer so we'll need a placeholder
38
# for the dropout probability too
39
# Use None for shape so we can change the batch_size once we've built the graph
40
with tf.name_scope('data'):
41
X = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")
42
Y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")
43
44
dropout = tf.placeholder(tf.float32, name='dropout')
45
46
# Step 4 + 5: create weights + do inference
47
# the model is conv -> relu -> pool -> conv -> relu -> pool -> fully connected -> softmax
48
49
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
50
51
utils.make_dir('checkpoints')
52
utils.make_dir('checkpoints/convnet_mnist')
53
54
with tf.variable_scope('conv1') as scope:
55
# first, reshape the image to [BATCH_SIZE, 28, 28, 1] to make it work with tf.nn.conv2d
56
# use the dynamic dimension -1
57
images = tf.reshape(X, shape=[-1, 28, 28, 1])
58
59
# TO DO
60
61
# create kernel variable of dimension [5, 5, 1, 32]
62
# use tf.truncated_normal_initializer()
63
64
# TO DO
65
66
# create biases variable of dimension [32]
67
# use tf.constant_initializer(0.0)
68
69
# TO DO
70
71
# apply tf.nn.conv2d. strides [1, 1, 1, 1], padding is 'SAME'
72
73
# TO DO
74
75
# apply relu on the sum of convolution output and biases
76
77
# TO DO
78
79
# output is of dimension BATCH_SIZE x 28 x 28 x 32
80
81
with tf.variable_scope('pool1') as scope:
82
# apply max pool with ksize [1, 2, 2, 1], and strides [1, 2, 2, 1], padding 'SAME'
83
84
# TO DO
85
86
# output is of dimension BATCH_SIZE x 14 x 14 x 32
87
88
with tf.variable_scope('conv2') as scope:
89
# similar to conv1, except kernel now is of the size 5 x 5 x 32 x 64
90
kernel = tf.get_variable('kernels', [5, 5, 32, 64],
91
initializer=tf.truncated_normal_initializer())
92
biases = tf.get_variable('biases', [64],
93
initializer=tf.random_normal_initializer())
94
conv = tf.nn.conv2d(pool1, kernel, strides=[1, 1, 1, 1], padding='SAME')
95
conv2 = tf.nn.relu(conv + biases, name=scope.name)
96
97
# output is of dimension BATCH_SIZE x 14 x 14 x 64
98
99
with tf.variable_scope('pool2') as scope:
100
# similar to pool1
101
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
102
padding='SAME')
103
104
# output is of dimension BATCH_SIZE x 7 x 7 x 64
105
106
with tf.variable_scope('fc') as scope:
107
# use weight of dimension 7 * 7 * 64 x 1024
108
input_features = 7 * 7 * 64
109
110
# create weights and biases
111
112
# TO DO
113
114
# reshape pool2 to 2 dimensional
115
pool2 = tf.reshape(pool2, [-1, input_features])
116
117
# apply relu on matmul of pool2 and w + b
118
fc = tf.nn.relu(tf.matmul(pool2, w) + b, name='relu')
119
120
# TO DO
121
122
# apply dropout
123
fc = tf.nn.dropout(fc, dropout, name='relu_dropout')
124
125
with tf.variable_scope('softmax_linear') as scope:
126
# this you should know. get logits without softmax
127
# you need to create weights and biases
128
129
# TO DO
130
131
# Step 6: define loss function
132
# use softmax cross entropy with logits as the loss function
133
# compute mean cross entropy, softmax is applied internally
134
with tf.name_scope('loss'):
135
# you should know how to do this too
136
137
# TO DO
138
139
# Step 7: define training op
140
# using gradient descent with learning rate of LEARNING_RATE to minimize cost
141
# don't forgot to pass in global_step
142
143
# TO DO
144
145
with tf.Session() as sess:
146
sess.run(tf.global_variables_initializer())
147
saver = tf.train.Saver()
148
# to visualize using TensorBoard
149
writer = tf.summary.FileWriter('./my_graph/mnist', sess.graph)
150
##### You have to create folders to store checkpoints
151
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_mnist/checkpoint'))
152
# if that checkpoint exists, restore from checkpoint
153
if ckpt and ckpt.model_checkpoint_path:
154
saver.restore(sess, ckpt.model_checkpoint_path)
155
156
initial_step = global_step.eval()
157
158
start_time = time.time()
159
n_batches = int(mnist.train.num_examples / BATCH_SIZE)
160
161
total_loss = 0.0
162
for index in range(initial_step, n_batches * N_EPOCHS): # train the model n_epochs times
163
X_batch, Y_batch = mnist.train.next_batch(BATCH_SIZE)
164
_, loss_batch = sess.run([optimizer, loss],
165
feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})
166
total_loss += loss_batch
167
if (index + 1) % SKIP_STEP == 0:
168
print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / SKIP_STEP))
169
total_loss = 0.0
170
saver.save(sess, 'checkpoints/convnet_mnist/mnist-convnet', index)
171
172
print("Optimization Finished!") # should be around 0.35 after 25 epochs
173
print("Total time: {0} seconds".format(time.time() - start_time))
174
175
# test the model
176
n_batches = int(mnist.test.num_examples/BATCH_SIZE)
177
total_correct_preds = 0
178
for i in range(n_batches):
179
X_batch, Y_batch = mnist.test.next_batch(BATCH_SIZE)
180
_, loss_batch, logits_batch = sess.run([optimizer, loss, logits],
181
feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})
182
preds = tf.nn.softmax(logits_batch)
183
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
184
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
185
total_correct_preds += sess.run(accuracy)
186
187
print("Accuracy {0}".format(total_correct_preds/mnist.test.num_examples))
188