📚 The CoCalc Library - books, templates and other resources
cocalc-examples / stanford-tensorflow-tutorials / 2017 / examples / 03_logistic_regression_mnist_sol.py
132927 viewsLicense: OTHER
""" Simple logistic regression model to solve OCR task1with MNIST in TensorFlow2MNIST dataset: yann.lecun.com/exdb/mnist/3Author: Chip Huyen4Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"5cs20si.stanford.edu6"""7import os8os.environ['TF_CPP_MIN_LOG_LEVEL']='2'910import numpy as np11import tensorflow as tf12from tensorflow.examples.tutorials.mnist import input_data13import time1415# Define paramaters for the model16learning_rate = 0.0117batch_size = 12818n_epochs = 301920# Step 1: Read in data21# using TF Learn's built in function to load MNIST data to the folder data/mnist22mnist = input_data.read_data_sets('/data/mnist', one_hot=True)2324# Step 2: create placeholders for features and labels25# each image in the MNIST data is of shape 28*28 = 78426# therefore, each image is represented with a 1x784 tensor27# there are 10 classes for each image, corresponding to digits 0 - 9.28# each lable is one hot vector.29X = tf.placeholder(tf.float32, [batch_size, 784], name='X_placeholder')30Y = tf.placeholder(tf.int32, [batch_size, 10], name='Y_placeholder')3132# Step 3: create weights and bias33# w is initialized to random variables with mean of 0, stddev of 0.0134# b is initialized to 035# shape of w depends on the dimension of X and Y so that Y = tf.matmul(X, w)36# shape of b depends on Y37w = tf.Variable(tf.random_normal(shape=[784, 10], stddev=0.01), name='weights')38b = tf.Variable(tf.zeros([1, 10]), name="bias")3940# Step 4: build model41# the model that returns the logits.42# this logits will be later passed through softmax layer43logits = tf.matmul(X, w) + b4445# Step 5: define loss function46# use cross entropy of softmax of logits as the loss function47entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y, name='loss')48loss = tf.reduce_mean(entropy) # computes the mean over all the examples in the batch4950# Step 6: define training op51# using gradient descent with learning rate of 0.01 to minimize loss52optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)5354with tf.Session() as sess:55# to visualize using TensorBoard56writer = tf.summary.FileWriter('./graphs/logistic_reg', sess.graph)5758start_time = time.time()59sess.run(tf.global_variables_initializer())60n_batches = int(mnist.train.num_examples/batch_size)61for i in range(n_epochs): # train the model n_epochs times62total_loss = 06364for _ in range(n_batches):65X_batch, Y_batch = mnist.train.next_batch(batch_size)66_, loss_batch = sess.run([optimizer, loss], feed_dict={X: X_batch, Y:Y_batch})67total_loss += loss_batch68print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))6970print('Total time: {0} seconds'.format(time.time() - start_time))7172print('Optimization Finished!') # should be around 0.35 after 25 epochs7374# test the model7576preds = tf.nn.softmax(logits)77correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))78accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(7980n_batches = int(mnist.test.num_examples/batch_size)81total_correct_preds = 08283for i in range(n_batches):84X_batch, Y_batch = mnist.test.next_batch(batch_size)85accuracy_batch = sess.run([accuracy], feed_dict={X: X_batch, Y:Y_batch})86total_correct_preds += accuracy_batch8788print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))8990writer.close()919293