Path: blob/master/Neural Networks and Deep Learning/Week 4/Building your Deep Neural Network - Step by Step/dnn_utils_v2.py
14481 views
import numpy as np12def sigmoid(Z):3"""4Implements the sigmoid activation in numpy56Arguments:7Z -- numpy array of any shape89Returns:10A -- output of sigmoid(z), same shape as Z11cache -- returns Z as well, useful during backpropagation12"""1314A = 1/(1+np.exp(-Z))15cache = Z1617return A, cache1819def relu(Z):20"""21Implement the RELU function.2223Arguments:24Z -- Output of the linear layer, of any shape2526Returns:27A -- Post-activation parameter, of the same shape as Z28cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently29"""3031A = np.maximum(0,Z)3233assert(A.shape == Z.shape)3435cache = Z36return A, cache373839def relu_backward(dA, cache):40"""41Implement the backward propagation for a single RELU unit.4243Arguments:44dA -- post-activation gradient, of any shape45cache -- 'Z' where we store for computing backward propagation efficiently4647Returns:48dZ -- Gradient of the cost with respect to Z49"""5051Z = cache52dZ = np.array(dA, copy=True) # just converting dz to a correct object.5354# When z <= 0, you should set dz to 0 as well.55dZ[Z <= 0] = 05657assert (dZ.shape == Z.shape)5859return dZ6061def sigmoid_backward(dA, cache):62"""63Implement the backward propagation for a single SIGMOID unit.6465Arguments:66dA -- post-activation gradient, of any shape67cache -- 'Z' where we store for computing backward propagation efficiently6869Returns:70dZ -- Gradient of the cost with respect to Z71"""7273Z = cache7475s = 1/(1+np.exp(-Z))76dZ = dA * s * (1-s)7778assert (dZ.shape == Z.shape)7980return dZ81828384