Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week6/opt_utils_v1a.py
Views: 13370
import numpy as np1import matplotlib.pyplot as plt2import h5py3import scipy.io4import sklearn5import sklearn.datasets67def sigmoid(x):8"""9Compute the sigmoid of x1011Arguments:12x -- A scalar or numpy array of any size.1314Return:15s -- sigmoid(x)16"""17s = 1/(1+np.exp(-x))18return s1920def relu(x):21"""22Compute the relu of x2324Arguments:25x -- A scalar or numpy array of any size.2627Return:28s -- relu(x)29"""30s = np.maximum(0,x)3132return s3334def load_params_and_grads(seed=1):35np.random.seed(seed)36W1 = np.random.randn(2,3)37b1 = np.random.randn(2,1)38W2 = np.random.randn(3,3)39b2 = np.random.randn(3,1)4041dW1 = np.random.randn(2,3)42db1 = np.random.randn(2,1)43dW2 = np.random.randn(3,3)44db2 = np.random.randn(3,1)4546return W1, b1, W2, b2, dW1, db1, dW2, db2474849def initialize_parameters(layer_dims):50"""51Arguments:52layer_dims -- python array (list) containing the dimensions of each layer in our network5354Returns:55parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":56W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])57b1 -- bias vector of shape (layer_dims[l], 1)58Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])59bl -- bias vector of shape (1, layer_dims[l])6061Tips:62- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].63This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!64- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.65"""6667np.random.seed(3)68parameters = {}69L = len(layer_dims) # number of layers in the network7071for l in range(1, L):72parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])* np.sqrt(2 / layer_dims[l-1])73parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))7475assert parameters['W' + str(l)].shape[0] == layer_dims[l], layer_dims[l-1]76assert parameters['W' + str(l)].shape[0] == layer_dims[l], 17778return parameters798081def compute_cost(a3, Y):8283"""84Implement the cost function8586Arguments:87a3 -- post-activation, output of forward propagation88Y -- "true" labels vector, same shape as a38990Returns:91cost - value of the cost function without dividing by number of training examples9293Note:94This is used with mini-batches,95so we'll first accumulate costs over an entire epoch96and then divide by the m training examples97"""9899logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)100cost_total = np.sum(logprobs)101102return cost_total103104def forward_propagation(X, parameters):105"""106Implements the forward propagation (and computes the loss) presented in Figure 2.107108Arguments:109X -- input dataset, of shape (input size, number of examples)110parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":111W1 -- weight matrix of shape ()112b1 -- bias vector of shape ()113W2 -- weight matrix of shape ()114b2 -- bias vector of shape ()115W3 -- weight matrix of shape ()116b3 -- bias vector of shape ()117118Returns:119loss -- the loss function (vanilla logistic loss)120"""121122# retrieve parameters123W1 = parameters["W1"]124b1 = parameters["b1"]125W2 = parameters["W2"]126b2 = parameters["b2"]127W3 = parameters["W3"]128b3 = parameters["b3"]129130# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID131z1 = np.dot(W1, X) + b1132a1 = relu(z1)133z2 = np.dot(W2, a1) + b2134a2 = relu(z2)135z3 = np.dot(W3, a2) + b3136a3 = sigmoid(z3)137138cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)139140return a3, cache141142def backward_propagation(X, Y, cache):143"""144Implement the backward propagation presented in figure 2.145146Arguments:147X -- input dataset, of shape (input size, number of examples)148Y -- true "label" vector (containing 0 if cat, 1 if non-cat)149cache -- cache output from forward_propagation()150151Returns:152gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables153"""154m = X.shape[1]155(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache156157dz3 = 1./m * (a3 - Y)158dW3 = np.dot(dz3, a2.T)159db3 = np.sum(dz3, axis=1, keepdims = True)160161da2 = np.dot(W3.T, dz3)162dz2 = np.multiply(da2, np.int64(a2 > 0))163dW2 = np.dot(dz2, a1.T)164db2 = np.sum(dz2, axis=1, keepdims = True)165166da1 = np.dot(W2.T, dz2)167dz1 = np.multiply(da1, np.int64(a1 > 0))168dW1 = np.dot(dz1, X.T)169db1 = np.sum(dz1, axis=1, keepdims = True)170171gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,172"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,173"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}174175return gradients176177def predict(X, y, parameters):178"""179This function is used to predict the results of a n-layer neural network.180181Arguments:182X -- data set of examples you would like to label183parameters -- parameters of the trained model184185Returns:186p -- predictions for the given dataset X187"""188189m = X.shape[1]190p = np.zeros((1,m), dtype = np.int)191192# Forward propagation193a3, caches = forward_propagation(X, parameters)194195# convert probas to 0/1 predictions196for i in range(0, a3.shape[1]):197if a3[0,i] > 0.5:198p[0,i] = 1199else:200p[0,i] = 0201202# print results203204#print ("predictions: " + str(p[0,:]))205#print ("true labels: " + str(y[0,:]))206print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))207208return p209210def load_2D_dataset():211data = scipy.io.loadmat('datasets/data.mat')212train_X = data['X'].T213train_Y = data['y'].T214test_X = data['Xval'].T215test_Y = data['yval'].T216217plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral);218219return train_X, train_Y, test_X, test_Y220221def plot_decision_boundary(model, X, y):222# Set min and max values and give it some padding223x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1224y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1225h = 0.01226# Generate a grid of points with distance h between them227xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))228# Predict the function value for the whole grid229Z = model(np.c_[xx.ravel(), yy.ravel()])230Z = Z.reshape(xx.shape)231# Plot the contour and training examples232plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)233plt.ylabel('x2')234plt.xlabel('x1')235plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)236plt.show()237238def predict_dec(parameters, X):239"""240Used for plotting decision boundary.241242Arguments:243parameters -- python dictionary containing your parameters244X -- input data of size (m, K)245246Returns247predictions -- vector of predictions of our model (red: 0 / blue: 1)248"""249250# Predict using forward propagation and a classification threshold of 0.5251a3, cache = forward_propagation(X, parameters)252predictions = (a3 > 0.5)253return predictions254255def load_dataset():256np.random.seed(3)257train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) #300 #0.2258# Visualize the data259plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);260train_X = train_X.T261train_Y = train_Y.reshape((1, train_Y.shape[0]))262263return train_X, train_Y264265