Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/C4 - Convolutional Neural Networks/Week 2/ResNets/resnets_utils.py
Views: 4818
import os1import numpy as np2import tensorflow as tf3import h5py4import math567def load_dataset():8train_dataset = h5py.File('datasets/train_signs.h5', "r")9# your train set features10train_set_x_orig = np.array(train_dataset["train_set_x"][:])11train_set_y_orig = np.array(12train_dataset["train_set_y"][:]) # your train set labels1314test_dataset = h5py.File('datasets/test_signs.h5', "r")15# your test set features16test_set_x_orig = np.array(test_dataset["test_set_x"][:])17test_set_y_orig = np.array(18test_dataset["test_set_y"][:]) # your test set labels1920classes = np.array(test_dataset["list_classes"][:]) # the list of classes2122train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))23test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))2425return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes262728def random_mini_batches(X, Y, mini_batch_size=64, seed=0):29"""30Creates a list of random minibatches from (X, Y)3132Arguments:33X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)34Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) (m, n_y)35mini_batch_size - size of the mini-batches, integer36seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.3738Returns:39mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)40"""4142m = X.shape[0] # number of training examples43mini_batches = []44np.random.seed(seed)4546# Step 1: Shuffle (X, Y)47permutation = list(np.random.permutation(m))48shuffled_X = X[permutation, :, :, :]49shuffled_Y = Y[permutation, :]5051# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.52# number of mini batches of size mini_batch_size in your partitionning53num_complete_minibatches = math.floor(m / mini_batch_size)54for k in range(0, num_complete_minibatches):55mini_batch_X = shuffled_X[k * mini_batch_size: k *56mini_batch_size + mini_batch_size, :, :, :]57mini_batch_Y = shuffled_Y[k * mini_batch_size: k *58mini_batch_size + mini_batch_size, :]59mini_batch = (mini_batch_X, mini_batch_Y)60mini_batches.append(mini_batch)6162# Handling the end case (last mini-batch < mini_batch_size)63if m % mini_batch_size != 0:64mini_batch_X = shuffled_X[num_complete_minibatches *65mini_batch_size: m, :, :, :]66mini_batch_Y = shuffled_Y[num_complete_minibatches *67mini_batch_size: m, :]68mini_batch = (mini_batch_X, mini_batch_Y)69mini_batches.append(mini_batch)7071return mini_batches727374def convert_to_one_hot(Y, C):75Y = np.eye(C)[Y.reshape(-1)].T76return Y777879def forward_propagation_for_predict(X, parameters):80"""81Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX8283Arguments:84X -- input dataset placeholder, of shape (input size, number of examples)85parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"86the shapes are given in initialize_parameters8788Returns:89Z3 -- the output of the last LINEAR unit90"""9192# Retrieve the parameters from the dictionary "parameters"93W1 = parameters['W1']94b1 = parameters['b1']95W2 = parameters['W2']96b2 = parameters['b2']97W3 = parameters['W3']98b3 = parameters['b3']99# Numpy Equivalents:100# Z1 = np.dot(W1, X) + b1101Z1 = tf.add(tf.matmul(W1, X), b1)102A1 = tf.nn.relu(Z1) # A1 = relu(Z1)103# Z2 = np.dot(W2, a1) + b2104Z2 = tf.add(tf.matmul(W2, A1), b2)105A2 = tf.nn.relu(Z2) # A2 = relu(Z2)106# Z3 = np.dot(W3,Z2) + b3107Z3 = tf.add(tf.matmul(W3, A2), b3)108109return Z3110111112def predict(X, parameters):113114W1 = tf.convert_to_tensor(parameters["W1"])115b1 = tf.convert_to_tensor(parameters["b1"])116W2 = tf.convert_to_tensor(parameters["W2"])117b2 = tf.convert_to_tensor(parameters["b2"])118W3 = tf.convert_to_tensor(parameters["W3"])119b3 = tf.convert_to_tensor(parameters["b3"])120121params = {"W1": W1,122"b1": b1,123"W2": W2,124"b2": b2,125"W3": W3,126"b3": b3}127128x = tf.placeholder("float", [12288, 1])129130z3 = forward_propagation_for_predict(x, params)131p = tf.argmax(z3)132133sess = tf.Session()134prediction = sess.run(p, feed_dict={x: X})135136return prediction137138139