Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/Convolutional Neural Networks/week4/Face Recognition/fr_utils.py
Views: 13376
#### PART OF THIS CODE IS USING CODE FROM VICTOR SY WANG: https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/utils.py ####12import tensorflow as tf3import numpy as np4import os5import cv26from numpy import genfromtxt7from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate8from keras.models import Model9from keras.layers.normalization import BatchNormalization10from keras.layers.pooling import MaxPooling2D, AveragePooling2D11import h5py12import matplotlib.pyplot as plt131415_FLOATX = 'float32'1617def variable(value, dtype=_FLOATX, name=None):18v = tf.Variable(np.asarray(value, dtype=dtype), name=name)19_get_session().run(v.initializer)20return v2122def shape(x):23return x.get_shape()2425def square(x):26return tf.square(x)2728def zeros(shape, dtype=_FLOATX, name=None):29return variable(np.zeros(shape), dtype, name)3031def concatenate(tensors, axis=-1):32if axis < 0:33axis = axis % len(tensors[0].get_shape())34return tf.concat(axis, tensors)3536def LRN2D(x):37return tf.nn.lrn(x, alpha=1e-4, beta=0.75)3839def conv2d_bn(x,40layer=None,41cv1_out=None,42cv1_filter=(1, 1),43cv1_strides=(1, 1),44cv2_out=None,45cv2_filter=(3, 3),46cv2_strides=(1, 1),47padding=None):48num = '' if cv2_out == None else '1'49tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format='channels_first', name=layer+'_conv'+num)(x)50tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+num)(tensor)51tensor = Activation('relu')(tensor)52if padding == None:53return tensor54tensor = ZeroPadding2D(padding=padding, data_format='channels_first')(tensor)55if cv2_out == None:56return tensor57tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format='channels_first', name=layer+'_conv'+'2')(tensor)58tensor = BatchNormalization(axis=1, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)59tensor = Activation('relu')(tensor)60return tensor6162WEIGHTS = [63'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',64'inception_3a_1x1_conv', 'inception_3a_1x1_bn',65'inception_3a_pool_conv', 'inception_3a_pool_bn',66'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',67'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',68'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',69'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',70'inception_3b_pool_conv', 'inception_3b_pool_bn',71'inception_3b_1x1_conv', 'inception_3b_1x1_bn',72'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',73'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',74'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',75'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',76'inception_4a_pool_conv', 'inception_4a_pool_bn',77'inception_4a_1x1_conv', 'inception_4a_1x1_bn',78'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',79'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',80'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',81'inception_5a_pool_conv', 'inception_5a_pool_bn',82'inception_5a_1x1_conv', 'inception_5a_1x1_bn',83'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',84'inception_5b_pool_conv', 'inception_5b_pool_bn',85'inception_5b_1x1_conv', 'inception_5b_1x1_bn',86'dense_layer'87]8889conv_shape = {90'conv1': [64, 3, 7, 7],91'conv2': [64, 64, 1, 1],92'conv3': [192, 64, 3, 3],93'inception_3a_1x1_conv': [64, 192, 1, 1],94'inception_3a_pool_conv': [32, 192, 1, 1],95'inception_3a_5x5_conv1': [16, 192, 1, 1],96'inception_3a_5x5_conv2': [32, 16, 5, 5],97'inception_3a_3x3_conv1': [96, 192, 1, 1],98'inception_3a_3x3_conv2': [128, 96, 3, 3],99'inception_3b_3x3_conv1': [96, 256, 1, 1],100'inception_3b_3x3_conv2': [128, 96, 3, 3],101'inception_3b_5x5_conv1': [32, 256, 1, 1],102'inception_3b_5x5_conv2': [64, 32, 5, 5],103'inception_3b_pool_conv': [64, 256, 1, 1],104'inception_3b_1x1_conv': [64, 256, 1, 1],105'inception_3c_3x3_conv1': [128, 320, 1, 1],106'inception_3c_3x3_conv2': [256, 128, 3, 3],107'inception_3c_5x5_conv1': [32, 320, 1, 1],108'inception_3c_5x5_conv2': [64, 32, 5, 5],109'inception_4a_3x3_conv1': [96, 640, 1, 1],110'inception_4a_3x3_conv2': [192, 96, 3, 3],111'inception_4a_5x5_conv1': [32, 640, 1, 1,],112'inception_4a_5x5_conv2': [64, 32, 5, 5],113'inception_4a_pool_conv': [128, 640, 1, 1],114'inception_4a_1x1_conv': [256, 640, 1, 1],115'inception_4e_3x3_conv1': [160, 640, 1, 1],116'inception_4e_3x3_conv2': [256, 160, 3, 3],117'inception_4e_5x5_conv1': [64, 640, 1, 1],118'inception_4e_5x5_conv2': [128, 64, 5, 5],119'inception_5a_3x3_conv1': [96, 1024, 1, 1],120'inception_5a_3x3_conv2': [384, 96, 3, 3],121'inception_5a_pool_conv': [96, 1024, 1, 1],122'inception_5a_1x1_conv': [256, 1024, 1, 1],123'inception_5b_3x3_conv1': [96, 736, 1, 1],124'inception_5b_3x3_conv2': [384, 96, 3, 3],125'inception_5b_pool_conv': [96, 736, 1, 1],126'inception_5b_1x1_conv': [256, 736, 1, 1],127}128129def load_weights_from_FaceNet(FRmodel):130# Load weights from csv files (which was exported from Openface torch model)131weights = WEIGHTS132weights_dict = load_weights()133134# Set layer weights of the model135for name in weights:136if FRmodel.get_layer(name) != None:137FRmodel.get_layer(name).set_weights(weights_dict[name])138elif model.get_layer(name) != None:139model.get_layer(name).set_weights(weights_dict[name])140141def load_weights():142# Set weights path143dirPath = './weights'144fileNames = filter(lambda f: not f.startswith('.'), os.listdir(dirPath))145paths = {}146weights_dict = {}147148for n in fileNames:149paths[n.replace('.csv', '')] = dirPath + '/' + n150151for name in WEIGHTS:152if 'conv' in name:153conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)154conv_w = np.reshape(conv_w, conv_shape[name])155conv_w = np.transpose(conv_w, (2, 3, 1, 0))156conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)157weights_dict[name] = [conv_w, conv_b]158elif 'bn' in name:159bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)160bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)161bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)162bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)163weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]164elif 'dense' in name:165dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)166dense_w = np.reshape(dense_w, (128, 736))167dense_w = np.transpose(dense_w, (1, 0))168dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)169weights_dict[name] = [dense_w, dense_b]170171return weights_dict172173174def load_dataset():175train_dataset = h5py.File('datasets/train_happy.h5', "r")176train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features177train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels178179test_dataset = h5py.File('datasets/test_happy.h5', "r")180test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features181test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels182183classes = np.array(test_dataset["list_classes"][:]) # the list of classes184185train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))186test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))187188return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes189190def img_to_encoding(image_path, model):191img1 = cv2.imread(image_path, 1)192img = img1[...,::-1]193img = np.around(np.transpose(img, (2,0,1))/255.0, decimals=12)194x_train = np.array([img])195embedding = model.predict_on_batch(x_train)196return embedding197198