CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
amanchadha

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: amanchadha/coursera-deep-learning-specialization
Path: blob/master/C4 - Convolutional Neural Networks/Week 1/public_tests.py
Views: 4802
1
import numpy as np
2
from test_utils import single_test, multiple_test
3
4
5
def zero_pad_test(target):
6
np.random.seed(1)
7
x = np.random.randn(4, 3, 3, 2)
8
pad = 2
9
expected_output = expected_output = np.array([[[[0., 0.],
10
[0., 0.],
11
[0., 0.],
12
[0., 0.],
13
[0., 0.],
14
[0., 0.],
15
[0., 0.]],
16
17
[[0., 0.],
18
[0., 0.],
19
[0., 0.],
20
[0., 0.],
21
[0., 0.],
22
[0., 0.],
23
[0., 0.]],
24
25
[[0., 0.],
26
[0., 0.],
27
[1.62434536, -0.61175641],
28
[-0.52817175, -1.07296862],
29
[0.86540763, -2.3015387],
30
[0., 0.],
31
[0., 0.]],
32
33
[[0., 0.],
34
[0., 0.],
35
[1.74481176, -0.7612069],
36
[0.3190391, -0.24937038],
37
[1.46210794, -2.06014071],
38
[0., 0.],
39
[0., 0.]],
40
41
[[0., 0.],
42
[0., 0.],
43
[-0.3224172, -0.38405435],
44
[1.13376944, -1.09989127],
45
[-0.17242821, -0.87785842],
46
[0., 0.],
47
[0., 0.]],
48
49
[[0., 0.],
50
[0., 0.],
51
[0., 0.],
52
[0., 0.],
53
[0., 0.],
54
[0., 0.],
55
[0., 0.]],
56
57
[[0., 0.],
58
[0., 0.],
59
[0., 0.],
60
[0., 0.],
61
[0., 0.],
62
[0., 0.],
63
[0., 0.]]],
64
65
66
[[[0., 0.],
67
[0., 0.],
68
[0., 0.],
69
[0., 0.],
70
[0., 0.],
71
[0., 0.],
72
[0., 0.]],
73
74
[[0., 0.],
75
[0., 0.],
76
[0., 0.],
77
[0., 0.],
78
[0., 0.],
79
[0., 0.],
80
[0., 0.]],
81
82
[[0., 0.],
83
[0., 0.],
84
[0.04221375, 0.58281521],
85
[-1.10061918, 1.14472371],
86
[0.90159072, 0.50249434],
87
[0., 0.],
88
[0., 0.]],
89
90
[[0., 0.],
91
[0., 0.],
92
[0.90085595, -0.68372786],
93
[-0.12289023, -0.93576943],
94
[-0.26788808, 0.53035547],
95
[0., 0.],
96
[0., 0.]],
97
98
[[0., 0.],
99
[0., 0.],
100
[-0.69166075, -0.39675353],
101
[-0.6871727, -0.84520564],
102
[-0.67124613, -0.0126646],
103
[0., 0.],
104
[0., 0.]],
105
106
[[0., 0.],
107
[0., 0.],
108
[0., 0.],
109
[0., 0.],
110
[0., 0.],
111
[0., 0.],
112
[0., 0.]],
113
114
[[0., 0.],
115
[0., 0.],
116
[0., 0.],
117
[0., 0.],
118
[0., 0.],
119
[0., 0.],
120
[0., 0.]]],
121
122
123
[[[0., 0.],
124
[0., 0.],
125
[0., 0.],
126
[0., 0.],
127
[0., 0.],
128
[0., 0.],
129
[0., 0.]],
130
131
[[0., 0.],
132
[0., 0.],
133
[0., 0.],
134
[0., 0.],
135
[0., 0.],
136
[0., 0.],
137
[0., 0.]],
138
139
[[0., 0.],
140
[0., 0.],
141
[-1.11731035, 0.2344157],
142
[1.65980218, 0.74204416],
143
[-0.19183555, -0.88762896],
144
[0., 0.],
145
[0., 0.]],
146
147
[[0., 0.],
148
[0., 0.],
149
[-0.74715829, 1.6924546],
150
[0.05080775, -0.63699565],
151
[0.19091548, 2.10025514],
152
[0., 0.],
153
[0., 0.]],
154
155
[[0., 0.],
156
[0., 0.],
157
[0.12015895, 0.61720311],
158
[0.30017032, -0.35224985],
159
[-1.1425182, -0.34934272],
160
[0., 0.],
161
[0., 0.]],
162
163
[[0., 0.],
164
[0., 0.],
165
[0., 0.],
166
[0., 0.],
167
[0., 0.],
168
[0., 0.],
169
[0., 0.]],
170
171
[[0., 0.],
172
[0., 0.],
173
[0., 0.],
174
[0., 0.],
175
[0., 0.],
176
[0., 0.],
177
[0., 0.]]],
178
179
180
[[[0., 0.],
181
[0., 0.],
182
[0., 0.],
183
[0., 0.],
184
[0., 0.],
185
[0., 0.],
186
[0., 0.]],
187
188
[[0., 0.],
189
[0., 0.],
190
[0., 0.],
191
[0., 0.],
192
[0., 0.],
193
[0., 0.],
194
[0., 0.]],
195
196
[[0., 0.],
197
[0., 0.],
198
[-0.20889423, 0.58662319],
199
[0.83898341, 0.93110208],
200
[0.28558733, 0.88514116],
201
[0., 0.],
202
[0., 0.]],
203
204
[[0., 0.],
205
[0., 0.],
206
[-0.75439794, 1.25286816],
207
[0.51292982, -0.29809284],
208
[0.48851815, -0.07557171],
209
[0., 0.],
210
[0., 0.]],
211
212
[[0., 0.],
213
[0., 0.],
214
[1.13162939, 1.51981682],
215
[2.18557541, -1.39649634],
216
[-1.44411381, -0.50446586],
217
[0., 0.],
218
[0., 0.]],
219
220
[[0., 0.],
221
[0., 0.],
222
[0., 0.],
223
[0., 0.],
224
[0., 0.],
225
[0., 0.],
226
[0., 0.]],
227
228
[[0., 0.],
229
[0., 0.],
230
[0., 0.],
231
[0., 0.],
232
[0., 0.],
233
[0., 0.],
234
[0., 0.]]]])
235
236
test_cases = [
237
{
238
"name": "datatype_check",
239
"input": [x, pad],
240
"expected": expected_output,
241
"error":"Datatype mismatch."
242
},
243
{
244
"name": "equation_output_check",
245
"input": [x, pad],
246
"expected": expected_output,
247
"error": "Wrong output"
248
}
249
]
250
251
single_test(test_cases, target)
252
253
254
def conv_single_step_test(target):
255
256
np.random.seed(1)
257
a_slice_prev = np.random.randn(4, 4, 3)
258
W = np.random.randn(4, 4, 3)
259
b = np.random.randn(1, 1, 1)
260
expected_output = np.float64(-6.999089450680221)
261
test_cases = [
262
{
263
"name": "datatype_check",
264
"input": [a_slice_prev, W, b],
265
"expected": expected_output,
266
"error":"Datatype mismatch"
267
},
268
{
269
"name": "shape_check",
270
"input": [a_slice_prev, W, b],
271
"expected": expected_output,
272
"error": "Wrong shape"
273
},
274
{
275
"name": "equation_output_check",
276
"input": [a_slice_prev, W, b],
277
"expected": expected_output,
278
"error": "Wrong output"
279
}
280
]
281
282
multiple_test(test_cases, target)
283
284
285
def conv_forward_test(target):
286
A_prev = np.random.randn(2, 5, 7, 4)
287
W = np.random.randn(3, 3, 4, 8)
288
b = np.random.randn(1, 1, 1, 8)
289
hparameters = {"pad" : 1,
290
"stride": 2}
291
Z, cache_conv = target(A_prev, W, b, {"pad" : 3, "stride": 1})
292
Z_shape = Z.shape
293
assert Z_shape[0] == A_prev.shape[0], f"m is wrong. Current: {Z_shape[0]}. Expected: {A_prev.shape[0]}"
294
assert Z_shape[1] == 9, f"n_H is wrong. Current: {Z_shape[1]}. Expected: 9"
295
assert Z_shape[2] == 11, f"n_W is wrong. Current: {Z_shape[2]}. Expected: 11"
296
assert Z_shape[3] == W.shape[3], f"n_C is wrong. Current: {Z_shape[3]}. Expected: {W.shape[3]}"
297
298
Z, cache_conv = target(A_prev, W, b, {"pad" : 0, "stride": 2})
299
assert(Z.shape == (2, 2, 3, 8)), "Wrong shape. Don't hard code the pad and stride values in the function"
300
301
np.random.seed(1)
302
A_prev = np.random.randn(2, 5, 7, 4)
303
W = np.random.randn(3, 3, 4, 8)
304
b = np.random.randn(1, 1, 1, 8)
305
hparameters = {"pad": 1,
306
"stride": 2}
307
expected_Z = np.array([[[[-2.65112363, -0.37849177, -1.97054929, -1.96235299,
308
-1.72259872, 0.4676693, -6.43434016, 1.10764994],
309
[4.67692928, 4.29865415, -1.3608031, 0.80532859,
310
-2.88480108, 8.95280034, 5.32627807, -1.82635258],
311
[-2.05881174, 3.40859795, 0.3502282, 0.68303626,
312
-1.88328065, -1.87480174, 5.8008721, 0.0700918],
313
[-3.50141791, 2.704286, 0.28341346, 4.15637411,
314
-0.46575834, -0.43668824, -5.56866106, 1.72288033]],
315
316
[[-2.32126108, 0.91040602, 2.31852532, 0.98842271,
317
3.31716611, 4.05638832, -2.48135123, 0.95872443],
318
[6.03978907, -6.96477888, -1.20799344, 2.68913374,
319
-4.35744033, 10.59355329, 3.20856901, 13.98735978],
320
[-3.01280755, -2.90226517, -8.34171936, -5.26220853,
321
5.6630696, 1.08704033, 2.20430705, -10.73218294],
322
[-6.24198266, -0.53158832, -3.29654954, -1.81865997,
323
0.59196322, 2.51134745, -4.24924673, 5.21936641]],
324
325
[[-2.22187412, -0.95259173, -5.99441273, 0.79147932,
326
1.16919278, -0.17321161, -3.26346299, -3.62407578],
327
[-2.17796037, 8.07171329, -0.5772704, 3.36286738,
328
4.48113645, -2.89198428, 10.99288867, 3.03171932],
329
[-12.49991261, 5.26845833, -1.67648614, -8.65695762,
330
-10.68157258, 6.71492428, 2.83839971, 4.47259772],
331
[0.11421092, -1.90872424, -3.28117601, 0.89922467,
332
0.83985348, -0.25127044, -0.94409718, 5.17244412]]],
333
334
335
[[[1.97649814, 2.76743075, -6.39611007, 2.95378171,
336
-0.81235239, -0.53333631, 0.71268871, 4.91385105],
337
[-5.14401869, 6.97041391, -4.53976469, 5.89092653,
338
-5.74606931, 2.74256558, 3.02124802, -10.04187592],
339
[5.53871187, -8.55886701, -4.70962135, 2.55966738,
340
-2.66959504, 5.60010695, -8.37253342, 4.18848278],
341
[0.63364517, -3.71848223, -3.67072772, 4.34226476,
342
-1.21894465, 3.68929452, 5.89166305, 0.94256457]],
343
344
[[2.36049402, -3.09696204, 8.33521755, 3.04680748,
345
3.7964542, 0.66488788, 1.9935476, 1.54396221],
346
[-7.73457048, 0.287562, 7.97481218, 3.32415996,
347
-4.07121488, 2.69182963, 4.1356109, -5.16178423],
348
[-6.95635186, -0.10924121, -4.12526441, 0.62578199,
349
4.69492086, -3.52748877, 3.63168271, 0.64007629],
350
[7.94980014, 5.71855659, 3.49970333, 12.7718152,
351
8.84959478, 2.37150319, -1.42531648, -0.51126641]],
352
353
[[-5.29658283, -4.20466999, -6.63067766, -9.87831724,
354
-5.32130395, 7.32417919, 2.96011091, 7.60669481],
355
[11.54630784, -1.93157244, 2.26699242, 7.62184275,
356
5.40584348, -2.88837958, -1.46981877, 7.91314719],
357
[5.94067877, 3.50739649, 0.82512202, 4.80655489,
358
-4.1044945, 4.14358541, 0.13194885, 4.35397285],
359
[4.91298364, -1.44499772, 5.9392078, -3.92690408,
360
2.12840309, 1.27237402, 1.56992581, 0.44270565]]]])
361
expected_cache = (A_prev, W, b, hparameters)
362
expected_output = (expected_Z, expected_cache)
363
test_cases = [
364
{
365
"name": "datatype_check",
366
"input": [A_prev, W, b, hparameters],
367
"expected": expected_output,
368
"error":"Datatype mismatch"
369
},
370
{
371
"name": "shape_check",
372
"input": [A_prev, W, b, hparameters],
373
"expected": expected_output,
374
"error": "Wrong shape"
375
},
376
{
377
"name": "equation_output_check",
378
"input": [A_prev, W, b, hparameters],
379
"expected": expected_output,
380
"error": "Wrong output"
381
}
382
]
383
384
multiple_test(test_cases, target)
385
386
387
def pool_forward_test(target):
388
389
A_prev = np.random.randn(2, 5, 7, 3)
390
A, cache = target(A_prev, {"stride" : 2, "f": 2}, mode = "average")
391
A_shape = A.shape
392
assert A_shape[0] == A_prev.shape[0], f"m is wrong. Current: {A_shape[0]}. Expected: {A_prev.shape[0]}"
393
assert A_shape[1] == 2, f"n_H is wrong. Current: {A_shape[1]}. Expected: 2"
394
assert A_shape[2] == 3, f"n_W is wrong. Current: {A_shape[2]}. Expected: 3"
395
assert A_shape[3] == A_prev.shape[3], f"n_C is wrong. Current: {A_shape[3]}. Expected: {A_prev.shape[3]}"
396
397
np.random.seed(1)
398
A_prev = np.random.randn(2, 5, 5, 3)
399
hparameters = {"stride": 1, "f": 3}
400
expected_cache = (A_prev, hparameters)
401
402
expected_A_max = np.array([[[[1.74481176, 0.90159072, 1.65980218],
403
[1.74481176, 1.46210794, 1.65980218],
404
[1.74481176, 1.6924546, 1.65980218]],
405
406
[[1.14472371, 0.90159072, 2.10025514],
407
[1.14472371, 0.90159072, 1.65980218],
408
[1.14472371, 1.6924546, 1.65980218]],
409
410
[[1.13162939, 1.51981682, 2.18557541],
411
[1.13162939, 1.51981682, 2.18557541],
412
[1.13162939, 1.6924546, 2.18557541]]],
413
414
415
[[[1.19891788, 0.84616065, 0.82797464],
416
[0.69803203, 0.84616065, 1.2245077],
417
[0.69803203, 1.12141771, 1.2245077]],
418
419
[[1.96710175, 0.84616065, 1.27375593],
420
[1.96710175, 0.84616065, 1.23616403],
421
[1.62765075, 1.12141771, 1.2245077]],
422
423
[[1.96710175, 0.86888616, 1.27375593],
424
[1.96710175, 0.86888616, 1.23616403],
425
[1.62765075, 1.12141771, 0.79280687]]]])
426
427
expected_output_max = (expected_A_max, expected_cache)
428
429
expected_A_average = np.array([[[[-3.01046719e-02, -3.24021315e-03, -3.36298859e-01],
430
[1.43310483e-01, 1.93146751e-01, -
431
4.44905196e-01],
432
[1.28934436e-01, 2.22428468e-01, 1.25067597e-01]],
433
434
[[-3.81801899e-01, 1.59993515e-02, 1.70562706e-01],
435
[4.73707165e-02, 2.59244658e-02,
436
9.20338402e-02],
437
[3.97048605e-02, 1.57189094e-01, 3.45302489e-01]],
438
439
[[-3.82680519e-01, 2.32579951e-01, 6.25997903e-01],
440
[-2.47157416e-01, -3.48524998e-04,
441
3.50539717e-01],
442
[-9.52551510e-02, 2.68511000e-01, 4.66056368e-01]]],
443
444
445
[[[-1.73134159e-01, 3.23771981e-01, -3.43175716e-01],
446
[3.80634669e-02, 7.26706274e-02, -
447
2.30268958e-01],
448
[2.03009393e-02, 1.41414785e-01, -1.23158476e-02]],
449
450
[[4.44976963e-01, -2.61694592e-03, -3.10403073e-01],
451
[5.08114737e-01, -
452
2.34937338e-01, -2.39611830e-01],
453
[1.18726772e-01, 1.72552294e-01, -2.21121966e-01]],
454
455
[[4.29449255e-01, 8.44699612e-02, -2.72909051e-01],
456
[6.76351685e-01, -
457
1.20138225e-01, -2.44076712e-01],
458
[1.50774518e-01, 2.89111751e-01, 1.23238536e-03]]]])
459
expected_output_average = (expected_A_average, expected_cache)
460
test_cases = [
461
{
462
"name": "datatype_check",
463
"input": [A_prev, hparameters, "max"],
464
"expected": expected_output_max,
465
"error":"Datatype mismatch in MAX-Pool"
466
},
467
{
468
"name": "shape_check",
469
"input": [A_prev, hparameters, "max"],
470
"expected": expected_output_max,
471
"error": "Wrong shape in MAX-Pool"
472
},
473
{
474
"name": "equation_output_check",
475
"input": [A_prev, hparameters, "max"],
476
"expected": expected_output_max,
477
"error": "Wrong output in MAX-Pool"
478
},
479
{
480
"name": "datatype_check",
481
"input": [A_prev, hparameters, "average"],
482
"expected": expected_output_average,
483
"error":"Datatype mismatch in AVG-Pool"
484
},
485
{
486
"name": "shape_check",
487
"input": [A_prev, hparameters, "average"],
488
"expected": expected_output_average,
489
"error": "Wrong shape in AVG-Pool"
490
},
491
{
492
"name": "equation_output_check",
493
"input": [A_prev, hparameters, "average"],
494
"expected": expected_output_average,
495
"error": "Wrong output in AVG-Pool"
496
}
497
]
498
499
multiple_test(test_cases, target)
500
501
######################################
502
############## UNGRADED ##############
503
######################################
504
505
506
def conv_backward_test(target):
507
508
test_cases = [
509
{
510
"name": "datatype_check",
511
"input": [parameters, cache, X, Y],
512
"expected": expected_output,
513
"error":"The function should return a numpy array."
514
},
515
{
516
"name": "shape_check",
517
"input": [parameters, cache, X, Y],
518
"expected": expected_output,
519
"error": "Wrong shape"
520
},
521
{
522
"name": "equation_output_check",
523
"input": [parameters, cache, X, Y],
524
"expected": expected_output,
525
"error": "Wrong output"
526
}
527
]
528
529
multiple_test(test_cases, target)
530
531
532
def create_mask_from_window_test(target):
533
534
test_cases = [
535
{
536
"name": "datatype_check",
537
"input": [parameters, grads],
538
"expected": expected_output,
539
"error":"Data type mismatch"
540
},
541
{
542
"name": "shape_check",
543
"input": [parameters, grads],
544
"expected": expected_output,
545
"error": "Wrong shape"
546
},
547
{
548
"name": "equation_output_check",
549
"input": [parameters, grads],
550
"expected": expected_output,
551
"error": "Wrong output"
552
}
553
]
554
555
multiple_test(test_cases, target)
556
557
558
def distribute_value_test(target):
559
test_cases = [
560
{
561
"name": "datatype_check",
562
"input": [X, Y, n_h],
563
"expected": expected_output,
564
"error":"Data type mismatch"
565
},
566
{
567
"name": "shape_check",
568
"input": [X, Y, n_h],
569
"expected": expected_output,
570
"error": "Wrong shape"
571
},
572
{
573
"name": "equation_output_check",
574
"input": [X, Y, n_h],
575
"expected": expected_output,
576
"error": "Wrong output"
577
}
578
]
579
580
multiple_test(test_cases, target)
581
582
583
def pool_backward_test(target):
584
585
test_cases = [
586
{
587
"name": "datatype_check",
588
"input": [parameters, X],
589
"expected": expected_output,
590
"error":"Data type mismatch"
591
},
592
{
593
"name": "shape_check",
594
"input": [parameters, X],
595
"expected": expected_output,
596
"error": "Wrong shape"
597
},
598
{
599
"name": "equation_output_check",
600
"input": [parameters, X],
601
"expected": expected_output,
602
"error": "Wrong output"
603
}
604
]
605
606
single_test(test_cases, target)
607
608