Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/master/Convolutional Neural Networks/week3/Car detection for Autonomous Driving/yad2k/models/keras_yolo.py
Views: 13383
"""YOLO_v2 Model Defined in Keras."""1import sys23import numpy as np4import tensorflow as tf5from keras import backend as K6from keras.layers import Lambda7from keras.layers.merge import concatenate8from keras.models import Model910from ..utils import compose11from .keras_darknet19 import (DarknetConv2D, DarknetConv2D_BN_Leaky, darknet_body)1213sys.path.append('..')1415voc_anchors = np.array(16[[1.08, 1.19], [3.42, 4.41], [6.63, 11.38], [9.42, 5.11], [16.62, 10.52]])1718voc_classes = [19"aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",20"chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",21"pottedplant", "sheep", "sofa", "train", "tvmonitor"22]232425def space_to_depth_x2(x):26"""Thin wrapper for Tensorflow space_to_depth with block_size=2."""27# Import currently required to make Lambda work.28# See: https://github.com/fchollet/keras/issues/5088#issuecomment-27385127329import tensorflow as tf30return tf.space_to_depth(x, block_size=2)313233def space_to_depth_x2_output_shape(input_shape):34"""Determine space_to_depth output shape for block_size=2.3536Note: For Lambda with TensorFlow backend, output shape may not be needed.37"""38return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 *39input_shape[3]) if input_shape[1] else (input_shape[0], None, None,404 * input_shape[3])414243def yolo_body(inputs, num_anchors, num_classes):44"""Create YOLO_V2 model CNN body in Keras."""45darknet = Model(inputs, darknet_body()(inputs))46conv20 = compose(47DarknetConv2D_BN_Leaky(1024, (3, 3)),48DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)4950conv13 = darknet.layers[43].output51conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)52# TODO: Allow Keras Lambda to use func arguments for output_shape?53conv21_reshaped = Lambda(54space_to_depth_x2,55output_shape=space_to_depth_x2_output_shape,56name='space_to_depth')(conv21)5758x = concatenate([conv21_reshaped, conv20])59x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)60x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)61return Model(inputs, x)626364def yolo_head(feats, anchors, num_classes):65"""Convert final layer features to bounding box parameters.6667Parameters68----------69feats : tensor70Final convolutional layer features.71anchors : array-like72Anchor box widths and heights.73num_classes : int74Number of target classes.7576Returns77-------78box_xy : tensor79x, y box predictions adjusted by spatial location in conv layer.80box_wh : tensor81w, h box predictions adjusted by anchors and conv spatial resolution.82box_conf : tensor83Probability estimate for whether each box contains any object.84box_class_pred : tensor85Probability distribution estimate for each box over class labels.86"""87num_anchors = len(anchors)88# Reshape to batch, height, width, num_anchors, box_params.89anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])90# Static implementation for fixed models.91# TODO: Remove or add option for static implementation.92# _, conv_height, conv_width, _ = K.int_shape(feats)93# conv_dims = K.variable([conv_width, conv_height])9495# Dynamic implementation of conv dims for fully convolutional model.96conv_dims = K.shape(feats)[1:3] # assuming channels last97# In YOLO the height index is the inner most iteration.98conv_height_index = K.arange(0, stop=conv_dims[0])99conv_width_index = K.arange(0, stop=conv_dims[1])100conv_height_index = K.tile(conv_height_index, [conv_dims[1]])101102# TODO: Repeat_elements and tf.split doesn't support dynamic splits.103# conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)104conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])105conv_width_index = K.flatten(K.transpose(conv_width_index))106conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))107conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])108conv_index = K.cast(conv_index, K.dtype(feats))109110feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])111conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))112113# Static generation of conv_index:114# conv_index = np.array([_ for _ in np.ndindex(conv_width, conv_height)])115# conv_index = conv_index[:, [1, 0]] # swap columns for YOLO ordering.116# conv_index = K.variable(117# conv_index.reshape(1, conv_height, conv_width, 1, 2))118# feats = Reshape(119# (conv_dims[0], conv_dims[1], num_anchors, num_classes + 5))(feats)120121box_confidence = K.sigmoid(feats[..., 4:5])122box_xy = K.sigmoid(feats[..., :2])123box_wh = K.exp(feats[..., 2:4])124box_class_probs = K.softmax(feats[..., 5:])125126# Adjust preditions to each spatial grid point and anchor size.127# Note: YOLO iterates over height index before width index.128box_xy = (box_xy + conv_index) / conv_dims129box_wh = box_wh * anchors_tensor / conv_dims130131return box_confidence, box_xy, box_wh, box_class_probs132133134def yolo_boxes_to_corners(box_xy, box_wh):135"""Convert YOLO box predictions to bounding box corners."""136box_mins = box_xy - (box_wh / 2.)137box_maxes = box_xy + (box_wh / 2.)138139return K.concatenate([140box_mins[..., 1:2], # y_min141box_mins[..., 0:1], # x_min142box_maxes[..., 1:2], # y_max143box_maxes[..., 0:1] # x_max144])145146147def yolo_loss(args,148anchors,149num_classes,150rescore_confidence=False,151print_loss=False):152"""YOLO localization loss function.153154Parameters155----------156yolo_output : tensor157Final convolutional layer features.158159true_boxes : tensor160Ground truth boxes tensor with shape [batch, num_true_boxes, 5]161containing box x_center, y_center, width, height, and class.162163detectors_mask : array1640/1 mask for detector positions where there is a matching ground truth.165166matching_true_boxes : array167Corresponding ground truth boxes for positive detector positions.168Already adjusted for conv height and width.169170anchors : tensor171Anchor boxes for model.172173num_classes : int174Number of object classes.175176rescore_confidence : bool, default=False177If true then set confidence target to IOU of best predicted box with178the closest matching ground truth box.179180print_loss : bool, default=False181If True then use a tf.Print() to print the loss components.182183Returns184-------185mean_loss : float186mean localization loss across minibatch187"""188(yolo_output, true_boxes, detectors_mask, matching_true_boxes) = args189num_anchors = len(anchors)190object_scale = 5191no_object_scale = 1192class_scale = 1193coordinates_scale = 1194pred_xy, pred_wh, pred_confidence, pred_class_prob = yolo_head(195yolo_output, anchors, num_classes)196197# Unadjusted box predictions for loss.198# TODO: Remove extra computation shared with yolo_head.199yolo_output_shape = K.shape(yolo_output)200feats = K.reshape(yolo_output, [201-1, yolo_output_shape[1], yolo_output_shape[2], num_anchors,202num_classes + 5203])204pred_boxes = K.concatenate(205(K.sigmoid(feats[..., 0:2]), feats[..., 2:4]), axis=-1)206207# TODO: Adjust predictions by image width/height for non-square images?208# IOUs may be off due to different aspect ratio.209210# Expand pred x,y,w,h to allow comparison with ground truth.211# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params212pred_xy = K.expand_dims(pred_xy, 4)213pred_wh = K.expand_dims(pred_wh, 4)214215pred_wh_half = pred_wh / 2.216pred_mins = pred_xy - pred_wh_half217pred_maxes = pred_xy + pred_wh_half218219true_boxes_shape = K.shape(true_boxes)220221# batch, conv_height, conv_width, num_anchors, num_true_boxes, box_params222true_boxes = K.reshape(true_boxes, [223true_boxes_shape[0], 1, 1, 1, true_boxes_shape[1], true_boxes_shape[2]224])225true_xy = true_boxes[..., 0:2]226true_wh = true_boxes[..., 2:4]227228# Find IOU of each predicted box with each ground truth box.229true_wh_half = true_wh / 2.230true_mins = true_xy - true_wh_half231true_maxes = true_xy + true_wh_half232233intersect_mins = K.maximum(pred_mins, true_mins)234intersect_maxes = K.minimum(pred_maxes, true_maxes)235intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)236intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]237238pred_areas = pred_wh[..., 0] * pred_wh[..., 1]239true_areas = true_wh[..., 0] * true_wh[..., 1]240241union_areas = pred_areas + true_areas - intersect_areas242iou_scores = intersect_areas / union_areas243244# Best IOUs for each location.245best_ious = K.max(iou_scores, axis=4) # Best IOU scores.246best_ious = K.expand_dims(best_ious)247248# A detector has found an object if IOU > thresh for some true box.249object_detections = K.cast(best_ious > 0.6, K.dtype(best_ious))250251# TODO: Darknet region training includes extra coordinate loss for early252# training steps to encourage predictions to match anchor priors.253254# Determine confidence weights from object and no_object weights.255# NOTE: YOLO does not use binary cross-entropy here.256no_object_weights = (no_object_scale * (1 - object_detections) *257(1 - detectors_mask))258no_objects_loss = no_object_weights * K.square(-pred_confidence)259260if rescore_confidence:261objects_loss = (object_scale * detectors_mask *262K.square(best_ious - pred_confidence))263else:264objects_loss = (object_scale * detectors_mask *265K.square(1 - pred_confidence))266confidence_loss = objects_loss + no_objects_loss267268# Classification loss for matching detections.269# NOTE: YOLO does not use categorical cross-entropy loss here.270matching_classes = K.cast(matching_true_boxes[..., 4], 'int32')271matching_classes = K.one_hot(matching_classes, num_classes)272classification_loss = (class_scale * detectors_mask *273K.square(matching_classes - pred_class_prob))274275# Coordinate loss for matching detection boxes.276matching_boxes = matching_true_boxes[..., 0:4]277coordinates_loss = (coordinates_scale * detectors_mask *278K.square(matching_boxes - pred_boxes))279280confidence_loss_sum = K.sum(confidence_loss)281classification_loss_sum = K.sum(classification_loss)282coordinates_loss_sum = K.sum(coordinates_loss)283total_loss = 0.5 * (284confidence_loss_sum + classification_loss_sum + coordinates_loss_sum)285if print_loss:286total_loss = tf.Print(287total_loss, [288total_loss, confidence_loss_sum, classification_loss_sum,289coordinates_loss_sum290],291message='yolo_loss, conf_loss, class_loss, box_coord_loss:')292293return total_loss294295296def yolo(inputs, anchors, num_classes):297"""Generate a complete YOLO_v2 localization model."""298num_anchors = len(anchors)299body = yolo_body(inputs, num_anchors, num_classes)300outputs = yolo_head(body.output, anchors, num_classes)301return outputs302303304def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):305"""Filter YOLO boxes based on object and class confidence."""306307box_scores = box_confidence * box_class_probs308box_classes = K.argmax(box_scores, axis=-1)309box_class_scores = K.max(box_scores, axis=-1)310prediction_mask = box_class_scores >= threshold311312# TODO: Expose tf.boolean_mask to Keras backend?313boxes = tf.boolean_mask(boxes, prediction_mask)314scores = tf.boolean_mask(box_class_scores, prediction_mask)315classes = tf.boolean_mask(box_classes, prediction_mask)316317return boxes, scores, classes318319320def yolo_eval(yolo_outputs,321image_shape,322max_boxes=10,323score_threshold=.6,324iou_threshold=.5):325"""Evaluate YOLO model on given input batch and return filtered boxes."""326box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs327boxes = yolo_boxes_to_corners(box_xy, box_wh)328boxes, scores, classes = yolo_filter_boxes(329box_confidence, boxes, box_class_probs, threshold=score_threshold)330331# Scale boxes back to original image shape.332height = image_shape[0]333width = image_shape[1]334image_dims = K.stack([height, width, height, width])335image_dims = K.reshape(image_dims, [1, 4])336boxes = boxes * image_dims337338# TODO: Something must be done about this ugly hack!339max_boxes_tensor = K.variable(max_boxes, dtype='int32')340K.get_session().run(tf.variables_initializer([max_boxes_tensor]))341nms_index = tf.image.non_max_suppression(342boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)343boxes = K.gather(boxes, nms_index)344scores = K.gather(scores, nms_index)345classes = K.gather(classes, nms_index)346347return boxes, scores, classes348349350def preprocess_true_boxes(true_boxes, anchors, image_size):351"""Find detector in YOLO where ground truth box should appear.352353Parameters354----------355true_boxes : array356List of ground truth boxes in form of relative x, y, w, h, class.357Relative coordinates are in the range [0, 1] indicating a percentage358of the original image dimensions.359anchors : array360List of anchors in form of w, h.361Anchors are assumed to be in the range [0, conv_size] where conv_size362is the spatial dimension of the final convolutional features.363image_size : array-like364List of image dimensions in form of h, w in pixels.365366Returns367-------368detectors_mask : array3690/1 mask for detectors in [conv_height, conv_width, num_anchors, 1]370that should be compared with a matching ground truth box.371matching_true_boxes: array372Same shape as detectors_mask with the corresponding ground truth box373adjusted for comparison with predicted parameters at training time.374"""375height, width = image_size376num_anchors = len(anchors)377# Downsampling factor of 5x 2-stride max_pools == 32.378# TODO: Remove hardcoding of downscaling calculations.379assert height % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'380assert width % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'381conv_height = height // 32382conv_width = width // 32383num_box_params = true_boxes.shape[1]384detectors_mask = np.zeros(385(conv_height, conv_width, num_anchors, 1), dtype=np.float32)386matching_true_boxes = np.zeros(387(conv_height, conv_width, num_anchors, num_box_params),388dtype=np.float32)389390for box in true_boxes:391# scale box to convolutional feature spatial dimensions392box_class = box[4:5]393box = box[0:4] * np.array(394[conv_width, conv_height, conv_width, conv_height])395i = np.floor(box[1]).astype('int')396j = min(np.floor(box[0]).astype('int'),1)397best_iou = 0398best_anchor = 0399400for k, anchor in enumerate(anchors):401# Find IOU between box shifted to origin and anchor box.402box_maxes = box[2:4] / 2.403box_mins = -box_maxes404anchor_maxes = (anchor / 2.)405anchor_mins = -anchor_maxes406407intersect_mins = np.maximum(box_mins, anchor_mins)408intersect_maxes = np.minimum(box_maxes, anchor_maxes)409intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)410intersect_area = intersect_wh[0] * intersect_wh[1]411box_area = box[2] * box[3]412anchor_area = anchor[0] * anchor[1]413iou = intersect_area / (box_area + anchor_area - intersect_area)414if iou > best_iou:415best_iou = iou416best_anchor = k417418if best_iou > 0:419detectors_mask[i, j, best_anchor] = 1420adjusted_box = np.array(421[422box[0] - j, box[1] - i,423np.log(box[2] / anchors[best_anchor][0]),424np.log(box[3] / anchors[best_anchor][1]), box_class425],426dtype=np.float32)427matching_true_boxes[i, j, best_anchor] = adjusted_box428return detectors_mask, matching_true_boxes429430431