CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
y33-j3T

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: y33-j3T/Coursera-Deep-Learning
Path: blob/master/Improving Deep Neural Networks Hyperparameter tuning, Regularization and Optimization/week6/opt_utils_v1a.py
Views: 13370
1
import numpy as np
2
import matplotlib.pyplot as plt
3
import h5py
4
import scipy.io
5
import sklearn
6
import sklearn.datasets
7
8
def sigmoid(x):
9
"""
10
Compute the sigmoid of x
11
12
Arguments:
13
x -- A scalar or numpy array of any size.
14
15
Return:
16
s -- sigmoid(x)
17
"""
18
s = 1/(1+np.exp(-x))
19
return s
20
21
def relu(x):
22
"""
23
Compute the relu of x
24
25
Arguments:
26
x -- A scalar or numpy array of any size.
27
28
Return:
29
s -- relu(x)
30
"""
31
s = np.maximum(0,x)
32
33
return s
34
35
def load_params_and_grads(seed=1):
36
np.random.seed(seed)
37
W1 = np.random.randn(2,3)
38
b1 = np.random.randn(2,1)
39
W2 = np.random.randn(3,3)
40
b2 = np.random.randn(3,1)
41
42
dW1 = np.random.randn(2,3)
43
db1 = np.random.randn(2,1)
44
dW2 = np.random.randn(3,3)
45
db2 = np.random.randn(3,1)
46
47
return W1, b1, W2, b2, dW1, db1, dW2, db2
48
49
50
def initialize_parameters(layer_dims):
51
"""
52
Arguments:
53
layer_dims -- python array (list) containing the dimensions of each layer in our network
54
55
Returns:
56
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
57
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
58
b1 -- bias vector of shape (layer_dims[l], 1)
59
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
60
bl -- bias vector of shape (1, layer_dims[l])
61
62
Tips:
63
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
64
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
65
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
66
"""
67
68
np.random.seed(3)
69
parameters = {}
70
L = len(layer_dims) # number of layers in the network
71
72
for l in range(1, L):
73
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1])* np.sqrt(2 / layer_dims[l-1])
74
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
75
76
assert parameters['W' + str(l)].shape[0] == layer_dims[l], layer_dims[l-1]
77
assert parameters['W' + str(l)].shape[0] == layer_dims[l], 1
78
79
return parameters
80
81
82
def compute_cost(a3, Y):
83
84
"""
85
Implement the cost function
86
87
Arguments:
88
a3 -- post-activation, output of forward propagation
89
Y -- "true" labels vector, same shape as a3
90
91
Returns:
92
cost - value of the cost function without dividing by number of training examples
93
94
Note:
95
This is used with mini-batches,
96
so we'll first accumulate costs over an entire epoch
97
and then divide by the m training examples
98
"""
99
100
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
101
cost_total = np.sum(logprobs)
102
103
return cost_total
104
105
def forward_propagation(X, parameters):
106
"""
107
Implements the forward propagation (and computes the loss) presented in Figure 2.
108
109
Arguments:
110
X -- input dataset, of shape (input size, number of examples)
111
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
112
W1 -- weight matrix of shape ()
113
b1 -- bias vector of shape ()
114
W2 -- weight matrix of shape ()
115
b2 -- bias vector of shape ()
116
W3 -- weight matrix of shape ()
117
b3 -- bias vector of shape ()
118
119
Returns:
120
loss -- the loss function (vanilla logistic loss)
121
"""
122
123
# retrieve parameters
124
W1 = parameters["W1"]
125
b1 = parameters["b1"]
126
W2 = parameters["W2"]
127
b2 = parameters["b2"]
128
W3 = parameters["W3"]
129
b3 = parameters["b3"]
130
131
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
132
z1 = np.dot(W1, X) + b1
133
a1 = relu(z1)
134
z2 = np.dot(W2, a1) + b2
135
a2 = relu(z2)
136
z3 = np.dot(W3, a2) + b3
137
a3 = sigmoid(z3)
138
139
cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)
140
141
return a3, cache
142
143
def backward_propagation(X, Y, cache):
144
"""
145
Implement the backward propagation presented in figure 2.
146
147
Arguments:
148
X -- input dataset, of shape (input size, number of examples)
149
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
150
cache -- cache output from forward_propagation()
151
152
Returns:
153
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
154
"""
155
m = X.shape[1]
156
(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache
157
158
dz3 = 1./m * (a3 - Y)
159
dW3 = np.dot(dz3, a2.T)
160
db3 = np.sum(dz3, axis=1, keepdims = True)
161
162
da2 = np.dot(W3.T, dz3)
163
dz2 = np.multiply(da2, np.int64(a2 > 0))
164
dW2 = np.dot(dz2, a1.T)
165
db2 = np.sum(dz2, axis=1, keepdims = True)
166
167
da1 = np.dot(W2.T, dz2)
168
dz1 = np.multiply(da1, np.int64(a1 > 0))
169
dW1 = np.dot(dz1, X.T)
170
db1 = np.sum(dz1, axis=1, keepdims = True)
171
172
gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
173
"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
174
"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
175
176
return gradients
177
178
def predict(X, y, parameters):
179
"""
180
This function is used to predict the results of a n-layer neural network.
181
182
Arguments:
183
X -- data set of examples you would like to label
184
parameters -- parameters of the trained model
185
186
Returns:
187
p -- predictions for the given dataset X
188
"""
189
190
m = X.shape[1]
191
p = np.zeros((1,m), dtype = np.int)
192
193
# Forward propagation
194
a3, caches = forward_propagation(X, parameters)
195
196
# convert probas to 0/1 predictions
197
for i in range(0, a3.shape[1]):
198
if a3[0,i] > 0.5:
199
p[0,i] = 1
200
else:
201
p[0,i] = 0
202
203
# print results
204
205
#print ("predictions: " + str(p[0,:]))
206
#print ("true labels: " + str(y[0,:]))
207
print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
208
209
return p
210
211
def load_2D_dataset():
212
data = scipy.io.loadmat('datasets/data.mat')
213
train_X = data['X'].T
214
train_Y = data['y'].T
215
test_X = data['Xval'].T
216
test_Y = data['yval'].T
217
218
plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral);
219
220
return train_X, train_Y, test_X, test_Y
221
222
def plot_decision_boundary(model, X, y):
223
# Set min and max values and give it some padding
224
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
225
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
226
h = 0.01
227
# Generate a grid of points with distance h between them
228
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
229
# Predict the function value for the whole grid
230
Z = model(np.c_[xx.ravel(), yy.ravel()])
231
Z = Z.reshape(xx.shape)
232
# Plot the contour and training examples
233
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
234
plt.ylabel('x2')
235
plt.xlabel('x1')
236
plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
237
plt.show()
238
239
def predict_dec(parameters, X):
240
"""
241
Used for plotting decision boundary.
242
243
Arguments:
244
parameters -- python dictionary containing your parameters
245
X -- input data of size (m, K)
246
247
Returns
248
predictions -- vector of predictions of our model (red: 0 / blue: 1)
249
"""
250
251
# Predict using forward propagation and a classification threshold of 0.5
252
a3, cache = forward_propagation(X, parameters)
253
predictions = (a3 > 0.5)
254
return predictions
255
256
def load_dataset():
257
np.random.seed(3)
258
train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) #300 #0.2
259
# Visualize the data
260
plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);
261
train_X = train_X.T
262
train_Y = train_Y.reshape((1, train_Y.shape[0]))
263
264
return train_X, train_Y
265