Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/C2 - Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/Week 3/improv_utils.py
Views: 4804
import h5py1import numpy as np2import tensorflow as tf3import math45def load_dataset():6train_dataset = h5py.File('datasets/train_signs.h5', "r")7train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features8train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels910test_dataset = h5py.File('datasets/test_signs.h5', "r")11test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features12test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels1314classes = np.array(test_dataset["list_classes"][:]) # the list of classes1516train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))17test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))1819return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes202122def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):23"""24Creates a list of random minibatches from (X, Y)2526Arguments:27X -- input data, of shape (input size, number of examples)28Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)29mini_batch_size - size of the mini-batches, integer30seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.3132Returns:33mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)34"""3536m = X.shape[1] # number of training examples37mini_batches = []38np.random.seed(seed)3940# Step 1: Shuffle (X, Y)41permutation = list(np.random.permutation(m))42shuffled_X = X[:, permutation]43shuffled_Y = Y[:, permutation].reshape((Y.shape[0],m))4445# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.46num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning47for k in range(0, num_complete_minibatches):48mini_batch_X = shuffled_X[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]49mini_batch_Y = shuffled_Y[:, k * mini_batch_size : k * mini_batch_size + mini_batch_size]50mini_batch = (mini_batch_X, mini_batch_Y)51mini_batches.append(mini_batch)5253# Handling the end case (last mini-batch < mini_batch_size)54if m % mini_batch_size != 0:55mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size : m]56mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size : m]57mini_batch = (mini_batch_X, mini_batch_Y)58mini_batches.append(mini_batch)5960return mini_batches6162def convert_to_one_hot(Y, C):63Y = np.eye(C)[Y.reshape(-1)].T64return Y6566def predict(X, parameters):6768W1 = tf.convert_to_tensor(parameters["W1"])69b1 = tf.convert_to_tensor(parameters["b1"])70W2 = tf.convert_to_tensor(parameters["W2"])71b2 = tf.convert_to_tensor(parameters["b2"])72W3 = tf.convert_to_tensor(parameters["W3"])73b3 = tf.convert_to_tensor(parameters["b3"])7475params = {"W1": W1,76"b1": b1,77"W2": W2,78"b2": b2,79"W3": W3,80"b3": b3}8182x = tf.placeholder("float", [12288, 1])8384z3 = forward_propagation(x, params)85p = tf.argmax(z3)8687with tf.Session() as sess:88prediction = sess.run(p, feed_dict = {x: X})8990return prediction919293def create_placeholders(n_x, n_y):94"""95Creates the placeholders for the tensorflow session.9697Arguments:98n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)99n_y -- scalar, number of classes (from 0 to 5, so -> 6)100101Returns:102X -- placeholder for the data input, of shape [n_x, None] and dtype "float"103Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"104105Tips:106- You will use None because it let's us be flexible on the number of examples you will for the placeholders.107In fact, the number of examples during test/train is different.108"""109110### START CODE HERE ### (approx. 2 lines)111X = tf.placeholder("float", [n_x, None])112Y = tf.placeholder("float", [n_y, None])113### END CODE HERE ###114115return X, Y116117118def initialize_parameters():119"""120Initializes parameters to build a neural network with tensorflow. The shapes are:121W1 : [25, 12288]122b1 : [25, 1]123W2 : [12, 25]124b2 : [12, 1]125W3 : [6, 12]126b3 : [6, 1]127128Returns:129parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3130"""131132tf.set_random_seed(1) # so that your "random" numbers match ours133134### START CODE HERE ### (approx. 6 lines of code)135W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))136b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())137W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))138b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())139W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))140b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())141### END CODE HERE ###142143parameters = {"W1": W1,144"b1": b1,145"W2": W2,146"b2": b2,147"W3": W3,148"b3": b3}149150return parameters151152153def compute_cost(z3, Y):154"""155Computes the cost156157Arguments:158z3 -- output of forward propagation (output of the last LINEAR unit), of shape (10, number of examples)159Y -- "true" labels vector placeholder, same shape as z3160161Returns:162cost - Tensor of the cost function163"""164165# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits()166logits = tf.transpose(z3)167labels = tf.transpose(Y)168169### START CODE HERE ### (1 line of code)170cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))171### END CODE HERE ###172173return cost174175176177178179180181def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,182num_epochs = 1500, minibatch_size = 32, print_cost = True):183"""184Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.185186Arguments:187X_train -- training set, of shape (input size = 12288, number of training examples = 1080)188Y_train -- test set, of shape (output size = 6, number of training examples = 1080)189X_test -- training set, of shape (input size = 12288, number of training examples = 120)190Y_test -- test set, of shape (output size = 6, number of test examples = 120)191learning_rate -- learning rate of the optimization192num_epochs -- number of epochs of the optimization loop193minibatch_size -- size of a minibatch194print_cost -- True to print the cost every 100 epochs195196Returns:197parameters -- parameters learnt by the model. They can then be used to predict.198"""199200ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables201tf.set_random_seed(1) # to keep consistent results202seed = 3 # to keep consistent results203(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)204n_y = Y_train.shape[0] # n_y : output size205costs = [] # To keep track of the cost206207# Create Placeholders of shape (n_x, n_y)208### START CODE HERE ### (1 line)209X, Y = create_placeholders(n_x, n_y)210### END CODE HERE ###211212# Initialize parameters213### START CODE HERE ### (1 line)214parameters = initialize_parameters()215### END CODE HERE ###216217# Forward propagation: Build the forward propagation in the tensorflow graph218### START CODE HERE ### (1 line)219z3 = forward_propagation(X, parameters)220### END CODE HERE ###221222# Cost function: Add cost function to tensorflow graph223### START CODE HERE ### (1 line)224cost = compute_cost(z3, Y)225### END CODE HERE ###226227# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.228### START CODE HERE ### (1 line)229optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)230### END CODE HERE ###231232# Initialize all the variables233init = tf.global_variables_initializer()234235# Start the session to compute the tensorflow graph236with tf.Session() as sess:237238# Run the initialization239sess.run(init)240241# Do the training loop242for epoch in range(num_epochs):243244minibatch_cost = 0.245num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set246seed = seed + 1247minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)248249for minibatch in minibatches:250251# Select a minibatch252(minibatch_X, minibatch_Y) = minibatch253254# IMPORTANT: The line that runs the graph on a minibatch.255# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).256### START CODE HERE ### (1 line)257_ , temp_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})258### END CODE HERE ###259260minibatch_cost += temp_cost / num_minibatches261262# Print the cost every epoch263if print_cost == True and epoch % 100 == 0:264print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))265if print_cost == True and epoch % 5 == 0:266costs.append(minibatch_cost)267268# plot the cost269plt.plot(np.squeeze(costs))270plt.ylabel('cost')271plt.xlabel('iterations (per tens)')272plt.title("Learning rate =" + str(learning_rate))273plt.show()274275# lets save the parameters in a variable276parameters = sess.run(parameters)277print ("Parameters have been trained!")278279# Calculate the correct predictions280correct_prediction = tf.equal(tf.argmax(z3), tf.argmax(Y))281282# Calculate accuracy on the test set283accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))284285print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))286print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))287288return parameters289290