CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
amanchadha

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: amanchadha/coursera-deep-learning-specialization
Path: blob/master/C4 - Convolutional Neural Networks/Week 4/Face Recognition/fr_utils.py
Views: 4804
1
#### PART OF THIS CODE IS USING CODE FROM VICTOR SY WANG: https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/utils.py ####
2
3
import tensorflow as tf
4
import numpy as np
5
import os
6
#import cv2
7
from numpy import genfromtxt
8
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
9
from tensorflow.keras.models import Model
10
from tensorflow.keras.layers import BatchNormalization
11
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
12
import h5py
13
import matplotlib.pyplot as plt
14
import PIL
15
from tensorflow.keras import backend as K
16
17
K.set_image_data_format('channels_first')
18
19
20
_FLOATX = 'float32'
21
22
def variable(value, dtype=_FLOATX, name=None):
23
v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
24
_get_session().run(v.initializer)
25
return v
26
27
def shape(x):
28
return x.get_shape()
29
30
def square(x):
31
return tf.square(x)
32
33
def zeros(shape, dtype=_FLOATX, name=None):
34
return variable(np.zeros(shape), dtype, name)
35
36
def concatenate(tensors, axis=-1):
37
if axis < 0:
38
axis = axis % len(tensors[0].get_shape())
39
return tf.concat(axis, tensors)
40
41
def LRN2D(x):
42
return tf.nn.lrn(x, alpha=1e-4, beta=0.75)
43
44
def conv2d_bn(x,
45
layer=None,
46
cv1_out=None,
47
cv1_filter=(1, 1),
48
cv1_strides=(1, 1),
49
cv2_out=None,
50
cv2_filter=(3, 3),
51
cv2_strides=(1, 1),
52
padding=None):
53
num = '' if cv2_out == None else '1'
54
tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x)
55
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor)
56
tensor = Activation('relu')(tensor)
57
if padding == None:
58
return tensor
59
tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)
60
if cv2_out == None:
61
return tensor
62
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor)
63
tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)
64
tensor = Activation('relu')(tensor)
65
return tensor
66
67
WEIGHTS = [
68
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
69
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
70
'inception_3a_pool_conv', 'inception_3a_pool_bn',
71
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
72
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
73
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
74
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
75
'inception_3b_pool_conv', 'inception_3b_pool_bn',
76
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
77
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
78
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
79
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
80
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
81
'inception_4a_pool_conv', 'inception_4a_pool_bn',
82
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
83
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
84
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
85
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
86
'inception_5a_pool_conv', 'inception_5a_pool_bn',
87
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
88
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
89
'inception_5b_pool_conv', 'inception_5b_pool_bn',
90
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
91
'dense_layer'
92
]
93
94
conv_shape = {
95
'conv1': [64, 3, 7, 7],
96
'conv2': [64, 64, 1, 1],
97
'conv3': [192, 64, 3, 3],
98
'inception_3a_1x1_conv': [64, 192, 1, 1],
99
'inception_3a_pool_conv': [32, 192, 1, 1],
100
'inception_3a_5x5_conv1': [16, 192, 1, 1],
101
'inception_3a_5x5_conv2': [32, 16, 5, 5],
102
'inception_3a_3x3_conv1': [96, 192, 1, 1],
103
'inception_3a_3x3_conv2': [128, 96, 3, 3],
104
'inception_3b_3x3_conv1': [96, 256, 1, 1],
105
'inception_3b_3x3_conv2': [128, 96, 3, 3],
106
'inception_3b_5x5_conv1': [32, 256, 1, 1],
107
'inception_3b_5x5_conv2': [64, 32, 5, 5],
108
'inception_3b_pool_conv': [64, 256, 1, 1],
109
'inception_3b_1x1_conv': [64, 256, 1, 1],
110
'inception_3c_3x3_conv1': [128, 320, 1, 1],
111
'inception_3c_3x3_conv2': [256, 128, 3, 3],
112
'inception_3c_5x5_conv1': [32, 320, 1, 1],
113
'inception_3c_5x5_conv2': [64, 32, 5, 5],
114
'inception_4a_3x3_conv1': [96, 640, 1, 1],
115
'inception_4a_3x3_conv2': [192, 96, 3, 3],
116
'inception_4a_5x5_conv1': [32, 640, 1, 1,],
117
'inception_4a_5x5_conv2': [64, 32, 5, 5],
118
'inception_4a_pool_conv': [128, 640, 1, 1],
119
'inception_4a_1x1_conv': [256, 640, 1, 1],
120
'inception_4e_3x3_conv1': [160, 640, 1, 1],
121
'inception_4e_3x3_conv2': [256, 160, 3, 3],
122
'inception_4e_5x5_conv1': [64, 640, 1, 1],
123
'inception_4e_5x5_conv2': [128, 64, 5, 5],
124
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
125
'inception_5a_3x3_conv2': [384, 96, 3, 3],
126
'inception_5a_pool_conv': [96, 1024, 1, 1],
127
'inception_5a_1x1_conv': [256, 1024, 1, 1],
128
'inception_5b_3x3_conv1': [96, 736, 1, 1],
129
'inception_5b_3x3_conv2': [384, 96, 3, 3],
130
'inception_5b_pool_conv': [96, 736, 1, 1],
131
'inception_5b_1x1_conv': [256, 736, 1, 1],
132
}
133
134
def load_weights_from_FaceNet(FRmodel):
135
# Load weights from csv files (which was exported from Openface torch model)
136
weights = WEIGHTS
137
weights_dict = load_weights()
138
139
# Set layer weights of the model
140
for name in weights:
141
if FRmodel.get_layer(name) != None:
142
FRmodel.get_layer(name).set_weights(weights_dict[name])
143
elif model.get_layer(name) != None:
144
model.get_layer(name).set_weights(weights_dict[name])
145
146
def load_weights():
147
# Set weights path
148
dirPath = './weights'
149
fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))
150
paths = {}
151
weights_dict = {}
152
153
for n in fileNames:
154
paths[n.replace('.csv', '')] = dirPath + '/' + n
155
156
for name in WEIGHTS:
157
if 'conv' in name:
158
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
159
conv_w = np.reshape(conv_w, conv_shape[name])
160
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
161
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
162
weights_dict[name] = [conv_w, conv_b]
163
elif 'bn' in name:
164
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
165
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
166
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
167
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
168
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
169
elif 'dense' in name:
170
dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)
171
dense_w = np.reshape(dense_w, (128, 736))
172
dense_w = np.transpose(dense_w, (1, 0))
173
dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)
174
weights_dict[name] = [dense_w, dense_b]
175
176
return weights_dict
177
178
179
def load_dataset():
180
train_dataset = h5py.File('datasets/train_happy.h5', "r")
181
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
182
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
183
184
test_dataset = h5py.File('datasets/test_happy.h5', "r")
185
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
186
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
187
188
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
189
190
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
191
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
192
193
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
194
195
def img_to_encoding(image_path, model):
196
#img = PIL.Image.open(image_path)
197
img = tf.keras.preprocessing.image.load_img(image_path)
198
#img = img1[...,::-1]
199
img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)
200
x_train = np.expand_dims(img, axis=0)
201
print(x_train.shape)
202
embedding = model.predict_on_batch(x_train)
203
return embedding
204