Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

📚 The CoCalc Library - books, templates and other resources

132929 views
License: OTHER
1
""" Using convolutional net on MNIST dataset of handwritten digit
2
(http://yann.lecun.com/exdb/mnist/)
3
Author: Chip Huyen
4
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
5
cs20si.stanford.edu
6
"""
7
8
from __future__ import absolute_import
9
from __future__ import division
10
from __future__ import print_function
11
12
import os
13
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
14
15
import time
16
17
import tensorflow as tf
18
import tf.contrib.layers as layers
19
from tensorflow.examples.tutorials.mnist import input_data
20
21
import utils
22
23
N_CLASSES = 10
24
25
# Step 1: Read in data
26
# using TF Learn's built in function to load MNIST data to the folder data/mnist
27
mnist = input_data.read_data_sets("/data/mnist", one_hot=True)
28
29
# Step 2: Define paramaters for the model
30
LEARNING_RATE = 0.001
31
BATCH_SIZE = 128
32
SKIP_STEP = 10
33
DROPOUT = 0.75
34
N_EPOCHS = 1
35
36
# Step 3: create placeholders for features and labels
37
# each image in the MNIST data is of shape 28*28 = 784
38
# therefore, each image is represented with a 1x784 tensor
39
# We'll be doing dropout for hidden layer so we'll need a placeholder
40
# for the dropout probability too
41
# Use None for shape so we can change the batch_size once we've built the graph
42
with tf.name_scope('data'):
43
X = tf.placeholder(tf.float32, [None, 784], name="X_placeholder")
44
Y = tf.placeholder(tf.float32, [None, 10], name="Y_placeholder")
45
46
dropout = tf.placeholder(tf.float32, name='dropout')
47
48
# Step 4 + 5: create weights + do inference
49
# the model is conv -> relu -> pool -> conv -> relu -> pool -> fully connected -> softmax
50
51
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
52
53
with tf.variable_scope('conv1') as scope:
54
# first, reshape the image to [BATCH_SIZE, 28, 28, 1] to make it work with tf.nn.conv2d
55
images = tf.reshape(X, shape=[-1, 28, 28, 1])
56
kernel = tf.get_variable('kernel', [5, 5, 1, 32],
57
initializer=tf.truncated_normal_initializer())
58
biases = tf.get_variable('biases', [32],
59
initializer=tf.random_normal_initializer())
60
conv = tf.nn.conv2d(images, kernel, strides=[1, 1, 1, 1], padding='SAME')
61
conv1 = tf.nn.relu(conv + biases, name=scope.name)
62
63
# output is of dimension BATCH_SIZE x 28 x 28 x 32
64
conv1 = layers.conv2d(images, 32, 5, 1, activation_fn=tf.nn.relu, padding='SAME')
65
66
with tf.variable_scope('pool1') as scope:
67
pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
68
padding='SAME')
69
70
# output is of dimension BATCH_SIZE x 14 x 14 x 32
71
72
with tf.variable_scope('conv2') as scope:
73
# similar to conv1, except kernel now is of the size 5 x 5 x 32 x 64
74
kernel = tf.get_variable('kernels', [5, 5, 32, 64],
75
initializer=tf.truncated_normal_initializer())
76
biases = tf.get_variable('biases', [64],
77
initializer=tf.random_normal_initializer())
78
conv = tf.nn.conv2d(pool1, kernel, strides=[1, 1, 1, 1], padding='SAME')
79
conv2 = tf.nn.relu(conv + biases, name=scope.name)
80
81
# output is of dimension BATCH_SIZE x 14 x 14 x 64
82
# layers.conv2d(images, 64, 5, 1, activation_fn=tf.nn.relu, padding='SAME')
83
84
with tf.variable_scope('pool2') as scope:
85
# similar to pool1
86
pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
87
padding='SAME')
88
89
# output is of dimension BATCH_SIZE x 7 x 7 x 64
90
91
with tf.variable_scope('fc') as scope:
92
# use weight of dimension 7 * 7 * 64 x 1024
93
input_features = 7 * 7 * 64
94
w = tf.get_variable('weights', [input_features, 1024],
95
initializer=tf.truncated_normal_initializer())
96
b = tf.get_variable('biases', [1024],
97
initializer=tf.constant_initializer(0.0))
98
99
# reshape pool2 to 2 dimensional
100
pool2 = tf.reshape(pool2, [-1, input_features])
101
fc = tf.nn.relu(tf.matmul(pool2, w) + b, name='relu')
102
103
# pool2 = layers.flatten(pool2)
104
# fc = layers.fully_connected(pool2, 1024, tf.nn.relu)
105
106
fc = tf.nn.dropout(fc, dropout, name='relu_dropout')
107
108
with tf.variable_scope('softmax_linear') as scope:
109
w = tf.get_variable('weights', [1024, N_CLASSES],
110
initializer=tf.truncated_normal_initializer())
111
b = tf.get_variable('biases', [N_CLASSES],
112
initializer=tf.random_normal_initializer())
113
logits = tf.matmul(fc, w) + b
114
115
116
117
118
# Step 6: define loss function
119
# use softmax cross entropy with logits as the loss function
120
# compute mean cross entropy, softmax is applied internally
121
with tf.name_scope('loss'):
122
entropy = tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=logits)
123
loss = tf.reduce_mean(entropy, name='loss')
124
125
with tf.name_scope('summaries'):
126
tf.summary.scalar('loss', loss)
127
tf.summary.histogram('histogram loss', loss)
128
summary_op = tf.summary.merge_all()
129
130
# Step 7: define training op
131
# using gradient descent with learning rate of LEARNING_RATE to minimize cost
132
optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss,
133
global_step=global_step)
134
135
utils.make_dir('checkpoints')
136
utils.make_dir('checkpoints/convnet_mnist')
137
138
with tf.Session() as sess:
139
sess.run(tf.global_variables_initializer())
140
saver = tf.train.Saver()
141
# to visualize using TensorBoard
142
writer = tf.summary.FileWriter('./graphs/convnet', sess.graph)
143
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/convnet_mnist/checkpoint'))
144
# if that checkpoint exists, restore from checkpoint
145
if ckpt and ckpt.model_checkpoint_path:
146
saver.restore(sess, ckpt.model_checkpoint_path)
147
148
initial_step = global_step.eval()
149
150
start_time = time.time()
151
n_batches = int(mnist.train.num_examples / BATCH_SIZE)
152
153
total_loss = 0.0
154
for index in range(initial_step, n_batches * N_EPOCHS): # train the model n_epochs times
155
X_batch, Y_batch = mnist.train.next_batch(BATCH_SIZE)
156
_, loss_batch, summary = sess.run([optimizer, loss, summary_op],
157
feed_dict={X: X_batch, Y:Y_batch, dropout: DROPOUT})
158
writer.add_summary(summary, global_step=index)
159
total_loss += loss_batch
160
if (index + 1) % SKIP_STEP == 0:
161
print('Average loss at step {}: {:5.1f}'.format(index + 1, total_loss / SKIP_STEP))
162
total_loss = 0.0
163
saver.save(sess, 'checkpoints/convnet_mnist/mnist-convnet', index)
164
165
print("Optimization Finished!") # should be around 0.35 after 25 epochs
166
print("Total time: {0} seconds".format(time.time() - start_time))
167
168
# test the model
169
n_batches = int(mnist.test.num_examples/BATCH_SIZE)
170
total_correct_preds = 0
171
for i in range(n_batches):
172
X_batch, Y_batch = mnist.test.next_batch(BATCH_SIZE)
173
_, loss_batch, logits_batch = sess.run([optimizer, loss, logits],
174
feed_dict={X: X_batch, Y:Y_batch, dropout: 1.0})
175
preds = tf.nn.softmax(logits_batch)
176
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y_batch, 1))
177
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32))
178
total_correct_preds += sess.run(accuracy)
179
180
print("Accuracy {0}".format(total_correct_preds/mnist.test.num_examples))
181