Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Download

📚 The CoCalc Library - books, templates and other resources

132928 views
License: OTHER
Kernel: Python 3

Credits: Forked from deep-learning-keras-tensorflow by Valerio Maggio

Convolution Nets for MNIST

Deep Learning models can take quite a bit of time to run, particularly if GPU isn't used.

In the interest of time, you could sample a subset of observations (e.g. 10001000) that are a particular number of your choice (e.g. 66) and 10001000 observations that aren't that particular number (i.e. ≠6\neq 6).

We will build a model using that and see how it performs on the test dataset

#Import the required libraries import numpy as np np.random.seed(1338) from keras.datasets import mnist
Using Theano backend. Using gpu device 0: GeForce GTX 760 (CNMeM is enabled with initial size: 90.0% of memory, cuDNN 4007)
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D from keras.layers.pooling import MaxPooling2D
from keras.utils import np_utils from keras.optimizers import SGD

Loading Data

path_to_dataset = "euroscipy_2016_dl-keras/data/mnist.pkl.gz" #Load the training and testing data (X_train, y_train), (X_test, y_test) = mnist.load_data(path_to_dataset)
X_test_orig = X_test

Data Preparation

img_rows, img_cols = 28, 28 X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols) X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255
# Seed for reproducibilty np.random.seed(1338) # Test data X_test = X_test.copy() Y = y_test.copy() # Converting the output to binary classification(Six=1,Not Six=0) Y_test = Y == 6 Y_test = Y_test.astype(int) # Selecting the 5918 examples where the output is 6 X_six = X_train[y_train == 6].copy() Y_six = y_train[y_train == 6].copy() # Selecting the examples where the output is not 6 X_not_six = X_train[y_train != 6].copy() Y_not_six = y_train[y_train != 6].copy() # Selecting 6000 random examples from the data that # only contains the data where the output is not 6 random_rows = np.random.randint(0,X_six.shape[0],6000) X_not_six = X_not_six[random_rows] Y_not_six = Y_not_six[random_rows]
# Appending the data with output as 6 and data with output as <> 6 X_train = np.append(X_six,X_not_six) # Reshaping the appended data to appropraite form X_train = X_train.reshape(X_six.shape[0] + X_not_six.shape[0], 1, img_rows, img_cols) # Appending the labels and converting the labels to # binary classification(Six=1,Not Six=0) Y_labels = np.append(Y_six,Y_not_six) Y_train = Y_labels == 6 Y_train = Y_train.astype(int)
print(X_train.shape, Y_labels.shape, X_test.shape, Y_test.shape)
(11918, 1, 28, 28) (11918,) (10000, 1, 28, 28) (10000, 2)
# Converting the classes to its binary categorical form nb_classes = 2 Y_train = np_utils.to_categorical(Y_train, nb_classes) Y_test = np_utils.to_categorical(Y_test, nb_classes)

A simple CNN

#Initializing the values for the convolution neural network nb_epoch = 2 batch_size = 128 # number of convolutional filters to use nb_filters = 32 # size of pooling area for max pooling nb_pool = 2 # convolution kernel size nb_conv = 3 sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)

Step 1: Model Definition

model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(nb_classes)) model.add(Activation('softmax'))

Step 2: Compile

model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])

Step 3: Fit

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
Train on 11918 samples, validate on 10000 samples Epoch 1/2 11918/11918 [==============================] - 0s - loss: 0.2890 - acc: 0.9326 - val_loss: 0.1251 - val_acc: 0.9722 Epoch 2/2 11918/11918 [==============================] - 0s - loss: 0.1341 - acc: 0.9612 - val_loss: 0.1298 - val_acc: 0.9599
<keras.callbacks.History at 0x7f6ccb68f630>

Step 4: Evaluate

# Evaluating the model on the test data score, accuracy = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score) print('Test accuracy:', accuracy)
Test score: 0.129807630396 Test accuracy: 0.9599

Let's plot our model Predictions!

import matplotlib.pyplot as plt %matplotlib inline
slice = 15 predicted = model.predict(X_test[:slice]).argmax(-1) plt.figure(figsize=(16,8)) for i in range(slice): plt.subplot(1, slice, i+1) plt.imshow(X_test_orig[i], interpolation='nearest') plt.text(0, 0, predicted[i], color='black', bbox=dict(facecolor='white', alpha=1)) plt.axis('off')
Image in a Jupyter notebook

Adding more Dense Layers

model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dense(nb_classes)) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
Train on 11918 samples, validate on 10000 samples Epoch 1/2 11918/11918 [==============================] - 0s - loss: 0.3044 - acc: 0.9379 - val_loss: 0.1469 - val_acc: 0.9625 Epoch 2/2 11918/11918 [==============================] - 0s - loss: 0.1189 - acc: 0.9640 - val_loss: 0.1058 - val_acc: 0.9655
<keras.callbacks.History at 0x7f6cf59f7358>
#Evaluating the model on the test data score, accuracy = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score) print('Test accuracy:', accuracy)
Test score: 0.105762729073 Test accuracy: 0.9655

Adding Dropout

model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
Train on 11918 samples, validate on 10000 samples Epoch 1/2 11918/11918 [==============================] - 0s - loss: 0.3128 - acc: 0.9097 - val_loss: 0.1438 - val_acc: 0.9624 Epoch 2/2 11918/11918 [==============================] - 0s - loss: 0.1362 - acc: 0.9580 - val_loss: 0.1145 - val_acc: 0.9628
<keras.callbacks.History at 0x7f6ccb180208>
#Evaluating the model on the test data score, accuracy = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score) print('Test accuracy:', accuracy)
Test score: 0.11448907243 Test accuracy: 0.9628

Adding more Convolution Layers

model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Convolution2D(nb_filters, nb_conv, nb_conv)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test))
Train on 11918 samples, validate on 10000 samples Epoch 1/2 11918/11918 [==============================] - 1s - loss: 0.4707 - acc: 0.8288 - val_loss: 0.2307 - val_acc: 0.9399 Epoch 2/2 11918/11918 [==============================] - 1s - loss: 0.1882 - acc: 0.9383 - val_loss: 0.1195 - val_acc: 0.9621
<keras.callbacks.History at 0x7f6cc97b8748>
#Evaluating the model on the test data score, accuracy = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score) print('Test accuracy:', accuracy)
Test score: 0.11954063682 Test accuracy: 0.9621

Exercise

The above code has been written as a function.

Change some of the hyperparameters and see what happens.

# Function for constructing the convolution neural network # Feel free to add parameters, if you want def build_model(): """""" model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(1, img_rows, img_cols))) model.add(Activation('relu')) model.add(Convolution2D(nb_filters, nb_conv, nb_conv)) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,verbose=1, validation_data=(X_test, Y_test)) #Evaluating the model on the test data score, accuracy = model.evaluate(X_test, Y_test, verbose=0) print('Test score:', score) print('Test accuracy:', accuracy)
#Timing how long it takes to build the model and test it. %timeit -n1 -r1 build_model()
Train on 11918 samples, validate on 10000 samples Epoch 1/2 11918/11918 [==============================] - 1s - loss: 0.5634 - acc: 0.7860 - val_loss: 0.3574 - val_acc: 0.9363 Epoch 2/2 11918/11918 [==============================] - 1s - loss: 0.2372 - acc: 0.9292 - val_loss: 0.2253 - val_acc: 0.9190 Test score: 0.225333989978 Test accuracy: 0.919 1 loop, best of 1: 5.45 s per loop

Batch Normalisation

Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.

How to BatchNorm in Keras

from keras.layers.normalization import BatchNormalization BatchNormalization(epsilon=1e-06, mode=0, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one')
# Try to add a new BatchNormalization layer to the Model # (after the Dropout layer)