📚 The CoCalc Library - books, templates and other resources
cocalc-examples / stanford-tensorflow-tutorials / 2017 / examples / 03_logistic_regression_mnist_starter.py
132927 viewsLicense: OTHER
""" Starter code for logistic regression model to solve OCR task1with MNIST in TensorFlow2MNIST dataset: yann.lecun.com/exdb/mnist/3Author: Chip Huyen4Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"5cs20si.stanford.edu6"""7import os8os.environ['TF_CPP_MIN_LOG_LEVEL']='2'910import numpy as np11import tensorflow as tf12from tensorflow.examples.tutorials.mnist import input_data13import time1415# Define paramaters for the model16learning_rate = 0.0117batch_size = 12818n_epochs = 101920# Step 1: Read in data21# using TF Learn's built in function to load MNIST data to the folder data/mnist22mnist = input_data.read_data_sets('/data/mnist', one_hot=True)2324# Step 2: create placeholders for features and labels25# each image in the MNIST data is of shape 28*28 = 78426# therefore, each image is represented with a 1x784 tensor27# there are 10 classes for each image, corresponding to digits 0 - 9.28# Features are of the type float, and labels are of the type int293031# Step 3: create weights and bias32# weights and biases are initialized to 033# shape of w depends on the dimension of X and Y so that Y = X * w + b34# shape of b depends on Y353637# Step 4: build model38# the model that returns the logits.39# this logits will be later passed through softmax layer40# to get the probability distribution of possible label of the image41# DO NOT DO SOFTMAX HERE424344# Step 5: define loss function45# use cross entropy loss of the real labels with the softmax of logits46# use the method:47# tf.nn.softmax_cross_entropy_with_logits(logits, Y)48# then use tf.reduce_mean to get the mean loss of the batch495051# Step 6: define training op52# using gradient descent to minimize loss535455with tf.Session() as sess:56start_time = time.time()57sess.run(tf.global_variables_initializer())58n_batches = int(mnist.train.num_examples/batch_size)59for i in range(n_epochs): # train the model n_epochs times60total_loss = 06162for _ in range(n_batches):63X_batch, Y_batch = mnist.train.next_batch(batch_size)64# TO-DO: run optimizer + fetch loss_batch65#66#67total_loss += loss_batch68print('Average loss epoch {0}: {1}'.format(i, total_loss/n_batches))6970print('Total time: {0} seconds'.format(time.time() - start_time))7172print('Optimization Finished!') # should be around 0.35 after 25 epochs7374# test the model75preds = tf.nn.softmax(logits)76correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))77accuracy = tf.reduce_sum(tf.cast(correct_preds, tf.float32)) # need numpy.count_nonzero(boolarr) :(7879n_batches = int(mnist.test.num_examples/batch_size)80total_correct_preds = 08182for i in range(n_batches):83X_batch, Y_batch = mnist.test.next_batch(batch_size)84accuracy_batch = sess.run([accuracy], feed_dict={X: X_batch, Y:Y_batch})85total_correct_preds += accuracy_batch8687print('Accuracy {0}'.format(total_correct_preds/mnist.test.num_examples))888990