Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/C4 - Convolutional Neural Networks/Week 2/ResNets/public_tests.py
Views: 4818
from termcolor import colored1import tensorflow as tf2from tensorflow.keras.initializers import random_uniform, glorot_uniform, constant, identity3import numpy as np45def identity_block_test(target):6np.random.seed(1)7#X = np.random.randn(3, 4, 4, 6).astype(np.float32)8X1 = np.ones((1, 4, 4, 3)) * -19X2 = np.ones((1, 4, 4, 3)) * 110X3 = np.ones((1, 4, 4, 3)) * 31112X = np.concatenate((X1, X2, X3), axis = 0).astype(np.float32)1314A3 = target(X,15f = 2,16filters = [4, 4, 3],17initializer=lambda seed=0:constant(value=1),18training=False)192021A3np = A3.numpy()22assert tuple(A3np.shape) == (3, 4, 4, 3), "Shapes does not match. This is really weird"23assert np.all(A3np >= 0), "The ReLu activation at the last layer is missing"24resume = A3np[:,(0,-1),:,:].mean(axis = 3)2526assert np.floor(resume[1, 0, 0]) == 2 * np.floor(resume[1, 0, 3]), "Check the padding and strides"27assert np.floor(resume[1, 0, 3]) == np.floor(resume[1, 1, 0]), "Check the padding and strides"28assert np.floor(resume[1, 1, 0]) == 2 * np.floor(resume[1, 1, 3]), "Check the padding and strides"29assert np.floor(resume[1, 1, 0]) == 2 * np.floor(resume[1, 1, 3]), "Check the padding and strides"3031assert resume[1, 1, 0] - np.floor(resume[1, 1, 0]) > 0.7, "Looks like the BatchNormalization units are not working"3233assert np.allclose(resume,34np.array([[[0.0, 0.0, 0.0, 0.0],35[0.0, 0.0, 0.0, 0.0]],36[[192.71236, 192.71236, 192.71236, 96.85619],37[ 96.85619, 96.85619, 96.85619, 48.9281 ]],38[[578.1371, 578.1371, 578.1371, 290.56854],39[290.56854, 290.56854, 290.56854, 146.78427]]]), atol = 1e-5 ), "Wrong values with training=False"4041np.random.seed(1)42A4 = target(X,43f = 3,44filters = [3, 3, 3],45initializer=lambda seed=7:constant(value=1),46training=True)47A4np = A4.numpy()48resume = A4np[:,(0,-1),:,:].mean(axis = 3)49assert np.allclose(resume,50np.array([[[0., 0., 0., 0., ],51[0., 0., 0., 0., ]],52[[0.37394285, 0.37394285, 0.37394285, 0.37394285],53[0.37394285, 0.37394285, 0.37394285, 0.37394285]],54[[3.2379014, 4.1394243, 4.1394243, 3.2379014 ],55[3.2379014, 4.1394243, 4.1394243, 3.2379014 ]]]), atol = 1e-5 ), "Wrong values with training=True"5657print(colored("All tests passed!", "green"))585960