Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

📚 The CoCalc Library - books, templates and other resources

132927 views
License: OTHER
1
""" Simple logistic regression model to solve OCR task
2
with MNIST in TensorFlow
3
MNIST dataset: yann.lecun.com/exdb/mnist/
4
Author: Chip Huyen
5
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
6
cs20si.stanford.edu
7
"""
8
import os
9
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
10
11
import numpy as np
12
import tensorflow as tf
13
from tensorflow.examples.tutorials.mnist import input_data
14
import time
15
16
# Define paramaters for the model
17
learning_rate = 0.01
18
batch_size = 128
19
n_epochs = 30
20
21
# Step 1: Read in data
22
# using TF Learn's built in function to load MNIST data to the folder data/mnist
23
mnist = input_data.read_data_sets('/data/mnist', one_hot=True)
24
25
# Step 2: create placeholders for features and labels
26
# each image in the MNIST data is of shape 28*28 = 784
27
# therefore, each image is represented with a 1x784 tensor
28
# there are 10 classes for each image, corresponding to digits 0 - 9.
29
# each lable is one hot vector.
30
X = tf.placeholder(tf.float32, [batch_size, 784], name='X_placeholder')
31
Y = tf.placeholder(tf.int32, [batch_size, 10], name='Y_placeholder')
32
33
# Step 3: create weights and bias
34
# w is initialized to random variables with mean of 0, stddev of 0.01
35
# b is initialized to 0
36
# shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)
37
# shape of b depends on Y
38
w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')
39
b = tf.Variable(tf.zeros([1, 10]), name="bias")
40
41
# Step 4: build model
42
# the model that returns the logits.
43
# this logits will be later passed through softmax layer
44
logits = tf.matmul(X, w) + b
45
46
# Step 5: define loss function
47
# use cross entropy of softmax of logits as the loss function
48
entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')
49
loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch
50
51
# Step 6: define training op
52
# using gradient descent with learning rate of 0.01 to minimize loss
53
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
54
55
with tf.Session() as sess:
56
# to visualize using TensorBoard
57
writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)
58
59
start_time = time.time()
60
sess.run(tf.global_variables_initializer())
61
n_batches = int(mnist.train.num_examples/batch_size)
62
for i in range(n_epochs): # train the model n_epochs times
63
total_loss = 0
64
65
for _ in range(n_batches):
66
X_batch, Y_batch = mnist.train.next_batch(batch_size)
67
_, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch})
68
total_loss += loss_batch
69
print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))
70
71
print('Total time: {0} seconds'.format(time.time() - start_time))
72
73
print('Optimization Finished!') # should be around 0.35 after 25 epochs
74
75
# test the model
76
77
preds = tf.nn.softmax(logits)
78
correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))
79
accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(
80
81
n_batches = int(mnist.test.num_examples/batch_size)
82
total_correct_preds = 0
83
84
for i in range(n_batches):
85
X_batch, Y_batch = mnist.test.next_batch(batch_size)
86
accuracy_batch = sess.run([accuracy], feed_dict={X: X_batch, Y:Y_batch})
87
total_correct_preds += accuracy_batch
88
89
print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))
90
91
writer.close()
92
93