desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function.'
@abstractmethod def num_anchors_per_location(self):
pass
'Generates a collection of bounding boxes to be used as anchors. TODO: remove **params from argument list and make stride and offsets (for multiple_grid_anchor_generator) constructor arguments. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Pairs can be provided as 1-dimensional integer tensors of length 2 or simply as tuples of integers. **params: parameters for anchor generation op Returns: boxes: a BoxList holding a collection of N anchor boxes Raises: ValueError: if the number of feature map shapes does not match the length of NumAnchorsPerLocation.'
def generate(self, feature_map_shape_list, **params):
if (self.check_num_anchors and (len(feature_map_shape_list) != len(self.num_anchors_per_location()))): raise ValueError('Number of feature maps is expected to equal the length of `num_anchors_per_location`.') with tf.name_scope(self.name_scope()): anchors = self._generate(feature_map_shape_list, **params) if self.check_num_anchors: with tf.control_dependencies([self._assert_correct_number_of_anchors(anchors, feature_map_shape_list)]): anchors.set(tf.identity(anchors.get())) return anchors
'To be overridden by implementations. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. **params: parameters for anchor generation op Returns: boxes: a BoxList holding a collection of N anchor boxes'
@abstractmethod def _generate(self, feature_map_shape_list, **params):
pass
'Assert that correct number of anchors was generated. Args: anchors: box_list.BoxList object holding anchors generated feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Returns: Op that raises InvalidArgumentError if the number of anchors does not match the number of expected anchors.'
def _assert_correct_number_of_anchors(self, anchors, feature_map_shape_list):
expected_num_anchors = 0 for (num_anchors_per_location, feature_map_shape) in zip(self.num_anchors_per_location(), feature_map_shape_list): expected_num_anchors += ((num_anchors_per_location * feature_map_shape[0]) * feature_map_shape[1]) return tf.assert_equal(expected_num_anchors, anchors.num_boxes())
'Constructs a minibatch sampler.'
def __init__(self):
pass
'Returns subsample of entries in indicator. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. **params: additional keyword arguments for specific implementations of the MinibatchSampler. Returns: sample_indicator: boolean tensor of shape [N] whose True entries have been sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size'
@abstractmethod def subsample(self, indicator, batch_size, **params):
pass
'Subsample indicator vector. Given a boolean indicator vector with M elements set to `True`, the function assigns all but `num_samples` of these previously `True` elements to `False`. If `num_samples` is greater than M, the original indicator vector is returned. Args: indicator: a 1-dimensional boolean tensor indicating which elements are allowed to be sampled and which are not. num_samples: int32 scalar tensor Returns: a boolean tensor with the same shape as input (indicator) tensor'
@staticmethod def subsample_indicator(indicator, num_samples):
indices = tf.where(indicator) indices = tf.random_shuffle(indices) indices = tf.reshape(indices, [(-1)]) num_samples = tf.minimum(tf.size(indices), num_samples) selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1])) selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0]) return tf.equal(selected_indicator, 1)
'Input preprocessing, resizes images to 28x28. Args: inputs: a [batch, height_in, width_in, channels] float32 tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor.'
def preprocess(self, inputs):
return tf.image.resize_images(inputs, [28, 28])
'Prediction tensors from inputs tensor. Args: preprocessed_inputs: a [batch, 28, 28, channels] float32 tensor. Returns: prediction_dict: a dictionary holding prediction tensors to be passed to the Loss or Postprocess functions.'
def predict(self, preprocessed_inputs):
flattened_inputs = tf.contrib.layers.flatten(preprocessed_inputs) class_prediction = tf.contrib.layers.fully_connected(flattened_inputs, self._num_classes) box_prediction = tf.contrib.layers.fully_connected(flattened_inputs, 4) return {'class_predictions_with_background': tf.reshape(class_prediction, [(-1), 1, self._num_classes]), 'box_encodings': tf.reshape(box_prediction, [(-1), 1, 4])}
'Convert predicted output tensors to final detections. Unused. Args: prediction_dict: a dictionary holding prediction tensors. **params: Additional keyword arguments for specific implementations of DetectionModel. Returns: detections: a dictionary with empty fields.'
def postprocess(self, prediction_dict, **params):
return {'detection_boxes': None, 'detection_scores': None, 'detection_classes': None, 'num_detections': None}
'Compute scalar loss tensors with respect to provided groundtruth. Calling this function requires that groundtruth tensors have been provided via the provide_groundtruth function. Args: prediction_dict: a dictionary holding predicted tensors Returns: a dictionary mapping strings (loss names) to scalar tensors representing loss values.'
def loss(self, prediction_dict):
batch_reg_targets = tf.stack(self.groundtruth_lists(fields.BoxListFields.boxes)) batch_cls_targets = tf.stack(self.groundtruth_lists(fields.BoxListFields.classes)) weights = tf.constant(1.0, dtype=tf.float32, shape=[len(self.groundtruth_lists(fields.BoxListFields.boxes)), 1]) location_losses = self._localization_loss(prediction_dict['box_encodings'], batch_reg_targets, weights=weights) cls_losses = self._classification_loss(prediction_dict['class_predictions_with_background'], batch_cls_targets, weights=weights) loss_dict = {'localization_loss': tf.reduce_sum(location_losses), 'classification_loss': tf.reduce_sum(cls_losses)} return loss_dict
'Return callable for loading a checkpoint into the tensorflow graph. Args: checkpoint_path: path to checkpoint to restore. from_detection_checkpoint: whether to restore from a full detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Returns: a callable which takes a tf.Session and does nothing.'
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
def restore(unused_sess): return return restore
'Helper function to assert if a proto field equals some value. Args: proto_field: The protobuf field to compare. expectation: The expected value of the protobuf field.'
def _assertProtoEqual(self, proto_field, expectation):
proto_list = [p for p in proto_field] self.assertListEqual(proto_list, expectation)
'Constructor sets keys_to_features and items_to_handlers.'
def __init__(self):
self.keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), 'image/format': tf.FixedLenFeature((), tf.string, default_value='jpeg'), 'image/filename': tf.FixedLenFeature((), tf.string, default_value=''), 'image/key/sha256': tf.FixedLenFeature((), tf.string, default_value=''), 'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''), 'image/height': tf.FixedLenFeature((), tf.int64, 1), 'image/width': tf.FixedLenFeature((), tf.int64, 1), 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32), 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32), 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32), 'image/object/class/label': tf.VarLenFeature(tf.int64), 'image/object/area': tf.VarLenFeature(tf.float32), 'image/object/is_crowd': tf.VarLenFeature(tf.int64), 'image/object/difficult': tf.VarLenFeature(tf.int64), 'image/segmentation/object': tf.VarLenFeature(tf.int64), 'image/segmentation/object/class': tf.VarLenFeature(tf.int64)} self.items_to_handlers = {fields.InputDataFields.image: slim_example_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3), fields.InputDataFields.source_id: slim_example_decoder.Tensor('image/source_id'), fields.InputDataFields.key: slim_example_decoder.Tensor('image/key/sha256'), fields.InputDataFields.filename: slim_example_decoder.Tensor('image/filename'), fields.InputDataFields.groundtruth_boxes: slim_example_decoder.BoundingBox(['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'), fields.InputDataFields.groundtruth_classes: slim_example_decoder.Tensor('image/object/class/label'), fields.InputDataFields.groundtruth_area: slim_example_decoder.Tensor('image/object/area'), fields.InputDataFields.groundtruth_is_crowd: slim_example_decoder.Tensor('image/object/is_crowd'), fields.InputDataFields.groundtruth_difficult: slim_example_decoder.Tensor('image/object/difficult'), fields.InputDataFields.groundtruth_instance_masks: slim_example_decoder.ItemHandlerCallback(['image/segmentation/object', 'image/height', 'image/width'], self._reshape_instance_masks), fields.InputDataFields.groundtruth_instance_classes: slim_example_decoder.Tensor('image/segmentation/object/class')}
'Decodes serialized tensorflow example and returns a tensor dictionary. Args: tf_example_string_tensor: a string tensor holding a serialized tensorflow example proto. Returns: A dictionary of the following tensors. fields.InputDataFields.image - 3D uint8 tensor of shape [None, None, 3] containing image. fields.InputDataFields.source_id - string tensor containing original image id. fields.InputDataFields.key - string tensor with unique sha256 hash key. fields.InputDataFields.filename - string tensor with original dataset filename. fields.InputDataFields.groundtruth_boxes - 2D float32 tensor of shape [None, 4] containing box corners. fields.InputDataFields.groundtruth_classes - 1D int64 tensor of shape [None] containing classes for the boxes. fields.InputDataFields.groundtruth_area - 1D float32 tensor of shape [None] containing containing object mask area in pixel squared. fields.InputDataFields.groundtruth_is_crowd - 1D bool tensor of shape [None] indicating if the boxes enclose a crowd. fields.InputDataFields.groundtruth_difficult - 1D bool tensor of shape [None] indicating if the boxes represent `difficult` instances. fields.InputDataFields.groundtruth_instance_masks - 3D int64 tensor of shape [None, None, None] containing instance masks. fields.InputDataFields.groundtruth_instance_classes - 1D int64 tensor of shape [None] containing classes for the instance masks.'
def Decode(self, tf_example_string_tensor):
serialized_example = tf.reshape(tf_example_string_tensor, shape=[]) decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, self.items_to_handlers) keys = decoder.list_items() tensors = decoder.decode(serialized_example, items=keys) tensor_dict = dict(zip(keys, tensors)) is_crowd = fields.InputDataFields.groundtruth_is_crowd tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool) tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3]) return tensor_dict
'Reshape instance segmentation masks. The instance segmentation masks are reshaped to [num_instances, height, width] and cast to boolean type to save memory. Args: keys_to_tensors: a dictionary from keys to tensors. Returns: A 3-D boolean tensor of shape [num_instances, height, width].'
def _reshape_instance_masks(self, keys_to_tensors):
masks = keys_to_tensors['image/segmentation/object'] if isinstance(masks, tf.SparseTensor): masks = tf.sparse_tensor_to_dense(masks) height = keys_to_tensors['image/height'] width = keys_to_tensors['image/width'] to_shape = tf.cast(tf.stack([(-1), height, width]), tf.int32) return tf.cast(tf.reshape(masks, to_shape), tf.bool)
'Constructor for FasterRcnnBoxCoder. Args: scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If set to None, does not perform scaling. For Faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].'
def __init__(self, scale_factors=None):
if scale_factors: assert (len(scale_factors) == 4) for scalar in scale_factors: assert (scalar > 0) self._scale_factors = scale_factors
'Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw].'
def _encode(self, boxes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() (ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes() ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = ((xcenter - xcenter_a) / wa) ty = ((ycenter - ycenter_a) / ha) tw = tf.log((w / wa)) th = tf.log((h / ha)) if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] return tf.transpose(tf.stack([ty, tx, th, tw]))
'Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes.'
def _decode(self, rel_codes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() (ty, tx, th, tw) = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = (tf.exp(tw) * wa) h = (tf.exp(th) * ha) ycenter = ((ty * ha) + ycenter_a) xcenter = ((tx * wa) + xcenter_a) ymin = (ycenter - (h / 2.0)) xmin = (xcenter - (w / 2.0)) ymax = (ycenter + (h / 2.0)) xmax = (xcenter + (w / 2.0)) return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
'Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of N anchors. We assume that anchors has an associated stddev field. Returns: a tensor representing N anchor-encoded boxes Raises: ValueError: if the anchors BoxList does not have a stddev field'
def _encode(self, boxes, anchors):
if (not anchors.has_field('stddev')): raise ValueError('anchors must have a stddev field') box_corners = boxes.get() means = anchors.get() stddev = anchors.get_field('stddev') return ((box_corners - means) / stddev)
'Decode. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. We assume that anchors has an associated stddev field. Returns: boxes: BoxList holding N bounding boxes Raises: ValueError: if the anchors BoxList does not have a stddev field'
def _decode(self, rel_codes, anchors):
if (not anchors.has_field('stddev')): raise ValueError('anchors must have a stddev field') means = anchors.get() stddevs = anchors.get_field('stddev') box_corners = ((rel_codes * stddevs) + means) return box_list.BoxList(box_corners)
'Constructor for SquareBoxCoder. Args: scale_factors: List of 3 positive scalars to scale ty, tx, and tl. If set to None, does not perform scaling. For faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0]. Raises: ValueError: If scale_factors is not length 3 or contains values less than or equal to 0.'
def __init__(self, scale_factors=None):
if scale_factors: if (len(scale_factors) != 3): raise ValueError('The argument scale_factors must be a list of length 3.') if any(((scalar <= 0) for scalar in scale_factors)): raise ValueError('The values in scale_factors must all be greater than 0.') self._scale_factors = scale_factors
'Encodes a box collection with respect to an anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, tl].'
def _encode(self, boxes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() la = tf.sqrt((ha * wa)) (ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes() l = tf.sqrt((h * w)) la += EPSILON l += EPSILON tx = ((xcenter - xcenter_a) / la) ty = ((ycenter - ycenter_a) / la) tl = tf.log((l / la)) if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] tl *= self._scale_factors[2] return tf.transpose(tf.stack([ty, tx, tl]))
'Decodes relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes.'
def _decode(self, rel_codes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() la = tf.sqrt((ha * wa)) (ty, tx, tl) = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] tl /= self._scale_factors[2] l = (tf.exp(tl) * la) ycenter = ((ty * la) + ycenter_a) xcenter = ((tx * la) + xcenter_a) ymin = (ycenter - (l / 2.0)) xmin = (xcenter - (l / 2.0)) ymax = (ycenter + (l / 2.0)) xmax = (xcenter + (l / 2.0)) return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
'Constructor for KeypointBoxCoder. Args: num_keypoints: Number of keypoints to encode/decode. scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. In addition to scaling ty and tx, the first 2 scalars are used to scale the y and x coordinates of the keypoints as well. If set to None, does not perform scaling.'
def __init__(self, num_keypoints, scale_factors=None):
self._num_keypoints = num_keypoints if scale_factors: assert (len(scale_factors) == 4) for scalar in scale_factors: assert (scalar > 0) self._scale_factors = scale_factors self._keypoint_scale_factors = None if (scale_factors is not None): self._keypoint_scale_factors = tf.expand_dims(tf.tile([tf.to_float(scale_factors[0]), tf.to_float(scale_factors[1])], [num_keypoints]), 1)
'Encode a box and keypoint collection with respect to anchor collection. Args: boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are tensors with the shape [N, 4], and keypoints are tensors with the shape [N, num_keypoints, 2]. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0 represent the y and x coordinates of the first keypoint, tky1 and tkx1 represent the y and x coordinates of the second keypoint, and so on.'
def _encode(self, boxes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() (ycenter, xcenter, h, w) = boxes.get_center_coordinates_and_sizes() keypoints = boxes.get_field(fields.BoxListFields.keypoints) keypoints = tf.transpose(tf.reshape(keypoints, [(-1), (self._num_keypoints * 2)])) num_boxes = boxes.num_boxes() ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = ((xcenter - xcenter_a) / wa) ty = ((ycenter - ycenter_a) / ha) tw = tf.log((w / wa)) th = tf.log((h / ha)) tiled_anchor_centers = tf.tile(tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile(tf.stack([ha, wa]), [self._num_keypoints, 1]) tkeypoints = ((keypoints - tiled_anchor_centers) / tiled_anchor_sizes) if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes]) tboxes = tf.stack([ty, tx, th, tw]) return tf.transpose(tf.concat([tboxes, tkeypoints], 0))
'Decode relative codes to boxes and keypoints. Args: rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N anchor-encoded boxes and keypoints anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes and keypoints.'
def _decode(self, rel_codes, anchors):
(ycenter_a, xcenter_a, ha, wa) = anchors.get_center_coordinates_and_sizes() num_codes = tf.shape(rel_codes)[0] result = tf.unstack(tf.transpose(rel_codes)) (ty, tx, th, tw) = result[:4] tkeypoints = result[4:] if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes]) w = (tf.exp(tw) * wa) h = (tf.exp(th) * ha) ycenter = ((ty * ha) + ycenter_a) xcenter = ((tx * wa) + xcenter_a) ymin = (ycenter - (h / 2.0)) xmin = (xcenter - (w / 2.0)) ymax = (ycenter + (h / 2.0)) xmax = (xcenter + (w / 2.0)) decoded_boxes_keypoints = box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax]))) tiled_anchor_centers = tf.tile(tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1]) tiled_anchor_sizes = tf.tile(tf.stack([ha, wa]), [self._num_keypoints, 1]) keypoints = ((tkeypoints * tiled_anchor_sizes) + tiled_anchor_centers) keypoints = tf.reshape(tf.transpose(keypoints), [(-1), self._num_keypoints, 2]) decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints) return decoded_boxes_keypoints
'Builds a 1x1 anchor grid to test the size of the output boxes.'
def test_construct_single_anchor(self):
scales = [0.5, 1.0, 2.0] aspect_ratios = [0.25, 1.0, 4.0] anchor_offset = [7, (-3)] exp_anchor_corners = [[(-121), (-35), 135, 29], [(-249), (-67), 263, 61], [(-505), (-131), 519, 125], [(-57), (-67), 71, 61], [(-121), (-131), 135, 125], [(-249), (-259), 263, 253], [(-25), (-131), 39, 125], [(-57), (-259), 71, 253], [(-121), (-515), 135, 509]] anchor_generator = grid_anchor_generator.GridAnchorGenerator(scales, aspect_ratios, anchor_offset=anchor_offset) anchors = anchor_generator.generate(feature_map_shape_list=[(1, 1)]) anchor_corners = anchors.get() with self.test_session(): anchor_corners_out = anchor_corners.eval() self.assertAllClose(anchor_corners_out, exp_anchor_corners)
'Constructs a GridAnchorGenerator. Args: scales: a list of (float) scales, default=(0.5, 1.0, 2.0) aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0) base_anchor_size: base anchor size as height, width ( (length-2 float32 list, default=[256, 256]) anchor_stride: difference in centers between base anchors for adjacent grid positions (length-2 float32 list, default=[16, 16]) anchor_offset: center of the anchor with scale and aspect ratio 1 for the upper left element of the grid, this should be zero for feature networks with only VALID padding and even receptive field size, but may need additional calculation if other padding is used (length-2 float32 tensor, default=[0, 0])'
def __init__(self, scales=(0.5, 1.0, 2.0), aspect_ratios=(0.5, 1.0, 2.0), base_anchor_size=None, anchor_stride=None, anchor_offset=None):
if (base_anchor_size is None): base_anchor_size = [256, 256] base_anchor_size = tf.constant(base_anchor_size, tf.float32) if (anchor_stride is None): anchor_stride = [16, 16] anchor_stride = tf.constant(anchor_stride, dtype=tf.float32) if (anchor_offset is None): anchor_offset = [0, 0] anchor_offset = tf.constant(anchor_offset, dtype=tf.float32) self._scales = scales self._aspect_ratios = aspect_ratios self._base_anchor_size = base_anchor_size self._anchor_stride = anchor_stride self._anchor_offset = anchor_offset
'Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function.'
def num_anchors_per_location(self):
return [(len(self._scales) * len(self._aspect_ratios))]
'Generates a collection of bounding boxes to be used as anchors. Args: feature_map_shape_list: list of pairs of convnet layer resolutions in the format [(height_0, width_0)]. For example, setting feature_map_shape_list=[(8, 8)] asks for anchors that correspond to an 8x8 layer. For this anchor generator, only lists of length 1 are allowed. Returns: boxes: a BoxList holding a collection of N anchor boxes Raises: ValueError: if feature_map_shape_list, box_specs_list do not have the same length. ValueError: if feature_map_shape_list does not consist of pairs of integers'
def _generate(self, feature_map_shape_list):
if (not (isinstance(feature_map_shape_list, list) and (len(feature_map_shape_list) == 1))): raise ValueError('feature_map_shape_list must be a list of length 1.') if (not all([(isinstance(list_item, tuple) and (len(list_item) == 2)) for list_item in feature_map_shape_list])): raise ValueError('feature_map_shape_list must be a list of pairs.') (grid_height, grid_width) = feature_map_shape_list[0] (scales_grid, aspect_ratios_grid) = ops.meshgrid(self._scales, self._aspect_ratios) scales_grid = tf.reshape(scales_grid, [(-1)]) aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [(-1)]) return tile_anchors(grid_height, grid_width, scales_grid, aspect_ratios_grid, self._base_anchor_size, self._anchor_stride, self._anchor_offset)
'Builds a 1x1 anchor grid to test the size of the output boxes.'
def test_construct_single_anchor_grid(self):
exp_anchor_corners = [[(-121), (-35), 135, 29], [(-249), (-67), 263, 61], [(-505), (-131), 519, 125], [(-57), (-67), 71, 61], [(-121), (-131), 135, 125], [(-249), (-259), 263, 253], [(-25), (-131), 39, 125], [(-57), (-259), 71, 253], [(-121), (-515), 135, 509]] base_anchor_size = tf.constant([256, 256], dtype=tf.float32) box_specs_list = [[(0.5, 0.25), (1.0, 0.25), (2.0, 0.25), (0.5, 1.0), (1.0, 1.0), (2.0, 1.0), (0.5, 4.0), (1.0, 4.0), (2.0, 4.0)]] anchor_generator = ag.MultipleGridAnchorGenerator(box_specs_list, base_anchor_size) anchors = anchor_generator.generate(feature_map_shape_list=[(1, 1)], anchor_strides=[(16, 16)], anchor_offsets=[(7, (-3))]) anchor_corners = anchors.get() with self.test_session(): anchor_corners_out = anchor_corners.eval() self.assertAllClose(anchor_corners_out, exp_anchor_corners)
'Constructs a MultipleGridAnchorGenerator. To construct anchors, at multiple grid resolutions, one must provide a list of feature_map_shape_list (e.g., [(8, 8), (4, 4)]), and for each grid size, a corresponding list of (scale, aspect ratio) box specifications. For example: box_specs_list = [[(.1, 1.0), (.1, 2.0)], # for 8x8 grid [(.2, 1.0), (.3, 1.0), (.2, 2.0)]] # for 4x4 grid To support the fully convolutional setting, we pass grid sizes in at generation time, while scale and aspect ratios are fixed at construction time. Args: box_specs_list: list of list of (scale, aspect ratio) pairs with the outside list having the same number of entries as feature_map_shape_list (which is passed in at generation time). base_anchor_size: base anchor size as [height, width] (length-2 float tensor, default=[256, 256]). clip_window: a tensor of shape [4] specifying a window to which all anchors should be clipped. If clip_window is None, then no clipping is performed. Raises: ValueError: if box_specs_list is not a list of list of pairs ValueError: if clip_window is not either None or a tensor of shape [4]'
def __init__(self, box_specs_list, base_anchor_size=None, clip_window=None):
if (isinstance(box_specs_list, list) and all([isinstance(list_item, list) for list_item in box_specs_list])): self._box_specs = box_specs_list else: raise ValueError('box_specs_list is expected to be a list of lists of pairs') if (base_anchor_size is None): base_anchor_size = tf.constant([256, 256], dtype=tf.float32) self._base_anchor_size = base_anchor_size if ((clip_window is not None) and (clip_window.get_shape().as_list() != [4])): raise ValueError('clip_window must either be None or a shape [4] tensor') self._clip_window = clip_window self._scales = [] self._aspect_ratios = [] for box_spec in self._box_specs: if (not all([(isinstance(entry, tuple) and (len(entry) == 2)) for entry in box_spec])): raise ValueError('box_specs_list is expected to be a list of lists of pairs') (scales, aspect_ratios) = zip(*box_spec) self._scales.append(scales) self._aspect_ratios.append(aspect_ratios)
'Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the Generate function.'
def num_anchors_per_location(self):
return [len(box_specs) for box_specs in self._box_specs]
'Generates a collection of bounding boxes to be used as anchors. The number of anchors generated for a single grid with shape MxM where we place k boxes over each grid center is k*M^2 and thus the total number of anchors is the sum over all grids. In our box_specs_list example (see the constructor docstring), we would place two boxes over each grid point on an 8x8 grid and three boxes over each grid point on a 4x4 grid and thus end up with 2*8^2 + 3*4^2 = 176 anchors in total. The layout of the output anchors follows the order of how the grid sizes and box_specs are specified (with box_spec index varying the fastest, followed by width index, then height index, then grid index). Args: feature_map_shape_list: list of pairs of convnet layer resolutions in the format [(height_0, width_0), (height_1, width_1), ...]. For example, setting feature_map_shape_list=[(8, 8), (7, 7)] asks for anchors that correspond to an 8x8 layer followed by a 7x7 layer. im_height: the height of the image to generate the grid for. If both im_height and im_width are 1, the generated anchors default to normalized coordinates, otherwise absolute coordinates are used for the grid. im_width: the width of the image to generate the grid for. If both im_height and im_width are 1, the generated anchors default to normalized coordinates, otherwise absolute coordinates are used for the grid. anchor_strides: list of pairs of strides (in y and x directions respectively). For example, setting anchor_strides=[(.25, .25), (.5, .5)] means that we want the anchors corresponding to the first layer to be strided by .25 and those in the second layer to be strided by .5 in both y and x directions. By default, if anchor_strides=None, then they are set to be the reciprocal of the corresponding grid sizes. The pairs can also be specified as dynamic tf.int or tf.float numbers, e.g. for variable shape input images. anchor_offsets: list of pairs of offsets (in y and x directions respectively). The offset specifies where we want the center of the (0, 0)-th anchor to lie for each layer. For example, setting anchor_offsets=[(.125, .125), (.25, .25)]) means that we want the (0, 0)-th anchor of the first layer to lie at (.125, .125) in image space and likewise that we want the (0, 0)-th anchor of the second layer to lie at (.25, .25) in image space. By default, if anchor_offsets=None, then they are set to be half of the corresponding anchor stride. The pairs can also be specified as dynamic tf.int or tf.float numbers, e.g. for variable shape input images. Returns: boxes: a BoxList holding a collection of N anchor boxes Raises: ValueError: if feature_map_shape_list, box_specs_list do not have the same length. ValueError: if feature_map_shape_list does not consist of pairs of integers'
def _generate(self, feature_map_shape_list, im_height=1, im_width=1, anchor_strides=None, anchor_offsets=None):
if (not (isinstance(feature_map_shape_list, list) and (len(feature_map_shape_list) == len(self._box_specs)))): raise ValueError('feature_map_shape_list must be a list with the same length as self._box_specs') if (not all([(isinstance(list_item, tuple) and (len(list_item) == 2)) for list_item in feature_map_shape_list])): raise ValueError('feature_map_shape_list must be a list of pairs.') if (not anchor_strides): anchor_strides = [((tf.to_float(im_height) / tf.to_float(pair[0])), (tf.to_float(im_width) / tf.to_float(pair[1]))) for pair in feature_map_shape_list] if (not anchor_offsets): anchor_offsets = [((0.5 * stride[0]), (0.5 * stride[1])) for stride in anchor_strides] for (arg, arg_name) in zip([anchor_strides, anchor_offsets], ['anchor_strides', 'anchor_offsets']): if (not (isinstance(arg, list) and (len(arg) == len(self._box_specs)))): raise ValueError(('%s must be a list with the same length as self._box_specs' % arg_name)) if (not all([(isinstance(list_item, tuple) and (len(list_item) == 2)) for list_item in arg])): raise ValueError(('%s must be a list of pairs.' % arg_name)) anchor_grid_list = [] min_im_shape = tf.to_float(tf.minimum(im_height, im_width)) base_anchor_size = (min_im_shape * self._base_anchor_size) for (grid_size, scales, aspect_ratios, stride, offset) in zip(feature_map_shape_list, self._scales, self._aspect_ratios, anchor_strides, anchor_offsets): anchor_grid_list.append(grid_anchor_generator.tile_anchors(grid_height=grid_size[0], grid_width=grid_size[1], scales=scales, aspect_ratios=aspect_ratios, base_anchor_size=base_anchor_size, anchor_stride=stride, anchor_offset=offset)) concatenated_anchors = box_list_ops.concatenate(anchor_grid_list) num_anchors = concatenated_anchors.num_boxes_static() if (num_anchors is None): num_anchors = concatenated_anchors.num_boxes() if (self._clip_window is not None): clip_window = tf.multiply(tf.to_float([im_height, im_width, im_height, im_width]), self._clip_window) concatenated_anchors = box_list_ops.clip_to_window(concatenated_anchors, clip_window, filter_nonoverlapping=False) concatenated_anchors.set(tf.reshape(concatenated_anchors.get(), [num_anchors, 4])) stddevs_tensor = (0.01 * tf.ones([num_anchors, 4], dtype=tf.float32, name='stddevs')) concatenated_anchors.add_field('stddev', stddevs_tensor) return concatenated_anchors
'Bipartite matches a collection rows and columns. A greedy bi-partite. TODO: Add num_valid_columns options to match only that many columns with all the rows. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher values mean more similar. num_valid_rows: A scalar or a 1-D tensor with one element describing the number of valid rows of similarity_matrix to consider for the bipartite matching. If set to be negative, then all rows from similarity_matrix are used. Returns: match_results: int32 tensor of shape [M] with match_results[i]=-1 meaning that column i is not matched and otherwise that it is matched to row match_results[i].'
def _match(self, similarity_matrix, num_valid_rows=(-1)):
distance_matrix = ((-1) * similarity_matrix) (_, match_results) = image_ops.bipartite_match(distance_matrix, num_valid_rows) match_results = tf.reshape(match_results, [(-1)]) match_results = tf.cast(match_results, tf.int32) return match_results
'Construct ArgMaxMatcher. Args: matched_threshold: Threshold for positive matches. Positive if sim >= matched_threshold, where sim is the maximum value of the similarity matrix for a given column. Set to None for no threshold. unmatched_threshold: Threshold for negative matches. Negative if sim < unmatched_threshold. Defaults to matched_threshold when set to None. negatives_lower_than_unmatched: Boolean which defaults to True. If True then negative matches are the ones below the unmatched_threshold, whereas ignored matches are in between the matched and umatched threshold. If False, then negative matches are in between the matched and unmatched threshold, and everything lower than unmatched is ignored. force_match_for_each_row: If True, ensures that each row is matched to at least one column (which is not guaranteed otherwise if the matched_threshold is high). Defaults to False. See argmax_matcher_test.testMatcherForceMatch() for an example. Raises: ValueError: if unmatched_threshold is set but matched_threshold is not set or if unmatched_threshold > matched_threshold.'
def __init__(self, matched_threshold, unmatched_threshold=None, negatives_lower_than_unmatched=True, force_match_for_each_row=False):
if ((matched_threshold is None) and (unmatched_threshold is not None)): raise ValueError('Need to also define matched_threshold whenunmatched_threshold is defined') self._matched_threshold = matched_threshold if (unmatched_threshold is None): self._unmatched_threshold = matched_threshold else: if (unmatched_threshold > matched_threshold): raise ValueError('unmatched_threshold needs to be smaller or equalto matched_threshold') self._unmatched_threshold = unmatched_threshold if (not negatives_lower_than_unmatched): if (self._unmatched_threshold == self._matched_threshold): raise ValueError('When negatives are in between matched and unmatched thresholds, these cannot be of equal value. matched: %s, unmatched: %s', self._matched_threshold, self._unmatched_threshold) self._force_match_for_each_row = force_match_for_each_row self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
'Tries to match each column of the similarity matrix to a row. Args: similarity_matrix: tensor of shape [N, M] representing any similarity metric. Returns: Match object with corresponding matches for each of M columns.'
def _match(self, similarity_matrix):
def _match_when_rows_are_empty(): "Performs matching when the rows of similarity matrix are empty.\n\n When the rows are empty, all detections are false positives. So we return\n a tensor of -1's to indicate that the columns do not match to any rows.\n\n Returns:\n matches: int32 tensor indicating the row each column matches to.\n " return ((-1) * tf.ones([tf.shape(similarity_matrix)[1]], dtype=tf.int32)) def _match_when_rows_are_non_empty(): 'Performs matching when the rows of similarity matrix are non empty.\n\n Returns:\n matches: int32 tensor indicating the row each column matches to.\n ' matches = tf.argmax(similarity_matrix, 0) if (self._matched_threshold is not None): matched_vals = tf.reduce_max(similarity_matrix, 0) below_unmatched_threshold = tf.greater(self._unmatched_threshold, matched_vals) between_thresholds = tf.logical_and(tf.greater_equal(matched_vals, self._unmatched_threshold), tf.greater(self._matched_threshold, matched_vals)) if self._negatives_lower_than_unmatched: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, (-1)) matches = self._set_values_using_indicator(matches, between_thresholds, (-2)) else: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, (-2)) matches = self._set_values_using_indicator(matches, between_thresholds, (-1)) if self._force_match_for_each_row: forced_matches_ids = tf.cast(tf.argmax(similarity_matrix, 1), tf.int32) row_range = tf.range(tf.shape(similarity_matrix)[0]) col_range = tf.range(tf.shape(similarity_matrix)[1]) forced_matches_values = tf.cast(row_range, matches.dtype) (keep_matches_ids, _) = tf.setdiff1d(col_range, forced_matches_ids) keep_matches_values = tf.gather(matches, keep_matches_ids) matches = tf.dynamic_stitch([forced_matches_ids, keep_matches_ids], [forced_matches_values, keep_matches_values]) return tf.cast(matches, tf.int32) return tf.cond(tf.greater(tf.shape(similarity_matrix)[0], 0), _match_when_rows_are_non_empty, _match_when_rows_are_empty)
'Set the indicated fields of x to val. Args: x: tensor. indicator: boolean with same shape as x. val: scalar with value to set. Returns: modified tensor.'
def _set_values_using_indicator(self, x, indicator, val):
indicator = tf.cast(indicator, x.dtype) return tf.add(tf.multiply(x, (1 - indicator)), (val * indicator))
'Helper to check if two dicts with floatst or integers are close.'
def assert_dictionary_close(self, dict1, dict2):
self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys())) for key in dict1: value = dict1[key] if isinstance(value, float): self.assertAlmostEqual(value, dict2[key]) else: self.assertEqual(value, dict2[key])
'Builds a DetectionModel based on the model config. Args: model_config: A model.proto object containing the config for the desired DetectionModel. Returns: DetectionModel based on the config.'
def create_model(self, model_config):
return model_builder.build(model_config, is_training=True)
'Initialized PerImageEvaluation by evaluation parameters. Args: num_groundtruth_classes: Number of ground truth object classes matching_iou_threshold: A ratio of area intersection to union, which is the threshold to consider whether a detection is true positive or not nms_iou_threshold: IOU threshold used in Non Maximum Suppression. nms_max_output_boxes: Number of maximum output boxes in NMS.'
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=0.3, nms_max_output_boxes=50):
self.matching_iou_threshold = matching_iou_threshold self.nms_iou_threshold = nms_iou_threshold self.nms_max_output_boxes = nms_max_output_boxes self.num_groundtruth_classes = num_groundtruth_classes
'Compute Object Detection related metrics from a single image. Args: detected_boxes: A float numpy array of shape [N, 4], representing N regions of detected object regions. Each row is of the format [y_min, x_min, y_max, x_max] detected_scores: A float numpy array of shape [N, 1], representing the confidence scores of the detected N object instances. detected_class_labels: A integer numpy array of shape [N, 1], repreneting the class labels of the detected N object instances. groundtruth_boxes: A float numpy array of shape [M, 4], representing M regions of object instances in ground truth groundtruth_class_labels: An integer numpy array of shape [M, 1], representing M class labels of object instances in ground truth groundtruth_is_difficult_lists: A boolean numpy array of length M denoting whether a ground truth box is a difficult instance or not Returns: scores: A list of C float numpy arrays. Each numpy array is of shape [K, 1], representing K scores detected with object class label c tp_fp_labels: A list of C boolean numpy arrays. Each numpy array is of shape [K, 1], representing K True/False positive label of object instances detected with class label c is_class_correctly_detected_in_image: a numpy integer array of shape [C, 1], indicating whether the correponding class has a least one instance being correctly detected in the image'
def compute_object_detection_metrics(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_lists):
(detected_boxes, detected_scores, detected_class_labels) = self._remove_invalid_boxes(detected_boxes, detected_scores, detected_class_labels) (scores, tp_fp_labels) = self._compute_tp_fp(detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_lists) is_class_correctly_detected_in_image = self._compute_cor_loc(detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels) return (scores, tp_fp_labels, is_class_correctly_detected_in_image)
'Compute CorLoc score for object detection result. Args: detected_boxes: A float numpy array of shape [N, 4], representing N regions of detected object regions. Each row is of the format [y_min, x_min, y_max, x_max] detected_scores: A float numpy array of shape [N, 1], representing the confidence scores of the detected N object instances. detected_class_labels: A integer numpy array of shape [N, 1], repreneting the class labels of the detected N object instances. groundtruth_boxes: A float numpy array of shape [M, 4], representing M regions of object instances in ground truth groundtruth_class_labels: An integer numpy array of shape [M, 1], representing M class labels of object instances in ground truth Returns: is_class_correctly_detected_in_image: a numpy integer array of shape [C, 1], indicating whether the correponding class has a least one instance being correctly detected in the image'
def _compute_cor_loc(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels):
is_class_correctly_detected_in_image = np.zeros(self.num_groundtruth_classes, dtype=int) for i in range(self.num_groundtruth_classes): gt_boxes_at_ith_class = groundtruth_boxes[(groundtruth_class_labels == i), :] detected_boxes_at_ith_class = detected_boxes[(detected_class_labels == i), :] detected_scores_at_ith_class = detected_scores[(detected_class_labels == i)] is_class_correctly_detected_in_image[i] = self._compute_is_aclass_correctly_detected_in_image(detected_boxes_at_ith_class, detected_scores_at_ith_class, gt_boxes_at_ith_class) return is_class_correctly_detected_in_image
'Compute CorLoc score for a single class. Args: detected_boxes: A numpy array of shape [N, 4] representing detected box coordinates detected_scores: A 1-d numpy array of length N representing classification score groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth box coordinates Returns: is_class_correctly_detected_in_image: An integer 1 or 0 denoting whether a class is correctly detected in the image or not'
def _compute_is_aclass_correctly_detected_in_image(self, detected_boxes, detected_scores, groundtruth_boxes):
if (detected_boxes.size > 0): if (groundtruth_boxes.size > 0): max_score_id = np.argmax(detected_scores) detected_boxlist = np_box_list.BoxList(np.expand_dims(detected_boxes[max_score_id, :], axis=0)) gt_boxlist = np_box_list.BoxList(groundtruth_boxes) iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist) if (np.max(iou) >= self.matching_iou_threshold): return 1 return 0
'Labels true/false positives of detections of an image across all classes. Args: detected_boxes: A float numpy array of shape [N, 4], representing N regions of detected object regions. Each row is of the format [y_min, x_min, y_max, x_max] detected_scores: A float numpy array of shape [N, 1], representing the confidence scores of the detected N object instances. detected_class_labels: A integer numpy array of shape [N, 1], repreneting the class labels of the detected N object instances. groundtruth_boxes: A float numpy array of shape [M, 4], representing M regions of object instances in ground truth groundtruth_class_labels: An integer numpy array of shape [M, 1], representing M class labels of object instances in ground truth groundtruth_is_difficult_lists: A boolean numpy array of length M denoting whether a ground truth box is a difficult instance or not Returns: result_scores: A list of float numpy arrays. Each numpy array is of shape [K, 1], representing K scores detected with object class label c result_tp_fp_labels: A list of boolean numpy array. Each numpy array is of shape [K, 1], representing K True/False positive label of object instances detected with class label c'
def _compute_tp_fp(self, detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_lists):
result_scores = [] result_tp_fp_labels = [] for i in range(self.num_groundtruth_classes): gt_boxes_at_ith_class = groundtruth_boxes[(groundtruth_class_labels == i), :] groundtruth_is_difficult_list_at_ith_class = groundtruth_is_difficult_lists[(groundtruth_class_labels == i)] detected_boxes_at_ith_class = detected_boxes[(detected_class_labels == i), :] detected_scores_at_ith_class = detected_scores[(detected_class_labels == i)] (scores, tp_fp_labels) = self._compute_tp_fp_for_single_class(detected_boxes_at_ith_class, detected_scores_at_ith_class, gt_boxes_at_ith_class, groundtruth_is_difficult_list_at_ith_class) result_scores.append(scores) result_tp_fp_labels.append(tp_fp_labels) return (result_scores, result_tp_fp_labels)
'Labels boxes detected with the same class from the same image as tp/fp. Args: detected_boxes: A numpy array of shape [N, 4] representing detected box coordinates detected_scores: A 1-d numpy array of length N representing classification score groundtruth_boxes: A numpy array of shape [M, 4] representing ground truth box coordinates groundtruth_is_difficult_list: A boolean numpy array of length M denoting whether a ground truth box is a difficult instance or not Returns: scores: A numpy array representing the detection scores tp_fp_labels: a boolean numpy array indicating whether a detection is a true positive.'
def _compute_tp_fp_for_single_class(self, detected_boxes, detected_scores, groundtruth_boxes, groundtruth_is_difficult_list):
if (detected_boxes.size == 0): return (np.array([], dtype=float), np.array([], dtype=bool)) detected_boxlist = np_box_list.BoxList(detected_boxes) detected_boxlist.add_field('scores', detected_scores) detected_boxlist = np_box_list_ops.non_max_suppression(detected_boxlist, self.nms_max_output_boxes, self.nms_iou_threshold) scores = detected_boxlist.get_field('scores') if (groundtruth_boxes.size == 0): return (scores, np.zeros(detected_boxlist.num_boxes(), dtype=bool)) gt_boxlist = np_box_list.BoxList(groundtruth_boxes) iou = np_box_list_ops.iou(detected_boxlist, gt_boxlist) max_overlap_gt_ids = np.argmax(iou, axis=1) is_gt_box_detected = np.zeros(gt_boxlist.num_boxes(), dtype=bool) tp_fp_labels = np.zeros(detected_boxlist.num_boxes(), dtype=bool) is_matched_to_difficult_box = np.zeros(detected_boxlist.num_boxes(), dtype=bool) for i in range(detected_boxlist.num_boxes()): gt_id = max_overlap_gt_ids[i] if (iou[(i, gt_id)] >= self.matching_iou_threshold): if (not groundtruth_is_difficult_list[gt_id]): if (not is_gt_box_detected[gt_id]): tp_fp_labels[i] = True is_gt_box_detected[gt_id] = True else: is_matched_to_difficult_box[i] = True return (scores[(~ is_matched_to_difficult_box)], tp_fp_labels[(~ is_matched_to_difficult_box)])
'This function creates an image that can be used to test vis functions. It makes an image composed of four colored rectangles. Returns: colorful test numpy array image.'
def create_colorful_test_image(self):
ch255 = np.full([100, 200, 1], 255, dtype=np.uint8) ch128 = np.full([100, 200, 1], 128, dtype=np.uint8) ch0 = np.full([100, 200, 1], 0, dtype=np.uint8) imr = np.concatenate((ch255, ch128, ch128), axis=2) img = np.concatenate((ch255, ch255, ch0), axis=2) imb = np.concatenate((ch255, ch0, ch255), axis=2) imw = np.concatenate((ch128, ch128, ch128), axis=2) imu = np.concatenate((imr, img), axis=1) imd = np.concatenate((imb, imw), axis=1) image = np.concatenate((imu, imd), axis=0) return image
'Add ground truth info of a single image into the evaluation database. Args: image_key: sha256 key of image content groundtruth_boxes: A numpy array of shape [M, 4] representing object box coordinates[y_min, x_min, y_max, x_max] groundtruth_class_labels: A 1-d numpy array of length M representing class labels groundtruth_is_difficult_list: A length M numpy boolean array denoting whether a ground truth box is a difficult instance or not. To support the case that no boxes are difficult, it is by default set as None.'
def add_single_ground_truth_image_info(self, image_key, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list=None):
if (image_key in self.groundtruth_boxes): logging.warn('image %s has already been added to the ground truth database.', image_key) return self.groundtruth_boxes[image_key] = groundtruth_boxes self.groundtruth_class_labels[image_key] = groundtruth_class_labels if (groundtruth_is_difficult_list is None): num_boxes = groundtruth_boxes.shape[0] groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool) self.groundtruth_is_difficult_list[image_key] = groundtruth_is_difficult_list.astype(dtype=bool) self._update_ground_truth_statistics(groundtruth_class_labels, groundtruth_is_difficult_list)
'Add detected result of a single image into the evaluation database. Args: image_key: sha256 key of image content detected_boxes: A numpy array of shape [N, 4] representing detected box coordinates[y_min, x_min, y_max, x_max] detected_scores: A 1-d numpy array of length N representing classification score detected_class_labels: A 1-d numpy array of length N representing class labels Raises: ValueError: if detected_boxes, detected_scores and detected_class_labels do not have the same length.'
def add_single_detected_image_info(self, image_key, detected_boxes, detected_scores, detected_class_labels):
if ((len(detected_boxes) != len(detected_scores)) or (len(detected_boxes) != len(detected_class_labels))): raise ValueError(('detected_boxes, detected_scores and detected_class_labels should all have same lengths. Got[%d, %d, %d]' % len(detected_boxes)), len(detected_scores), len(detected_class_labels)) if (image_key in self.detection_keys): logging.warn('image %s has already been added to the detection result database', image_key) return self.detection_keys.add(image_key) if (image_key in self.groundtruth_boxes): groundtruth_boxes = self.groundtruth_boxes[image_key] groundtruth_class_labels = self.groundtruth_class_labels[image_key] groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[image_key] else: groundtruth_boxes = np.empty(shape=[0, 4], dtype=float) groundtruth_class_labels = np.array([], dtype=int) groundtruth_is_difficult_list = np.array([], dtype=bool) (scores, tp_fp_labels, is_class_correctly_detected_in_image) = self.per_image_eval.compute_object_detection_metrics(detected_boxes, detected_scores, detected_class_labels, groundtruth_boxes, groundtruth_class_labels, groundtruth_is_difficult_list) for i in range(self.num_class): self.scores_per_class[i].append(scores[i]) self.tp_fp_labels_per_class[i].append(tp_fp_labels[i]) self.num_images_correctly_detected_per_class += is_class_correctly_detected_in_image
'Update grouth truth statitistics. 1. Difficult boxes are ignored when counting the number of ground truth instances as done in Pascal VOC devkit. 2. Difficult boxes are treated as normal boxes when computing CorLoc related statitistics. Args: groundtruth_class_labels: An integer numpy array of length M, representing M class labels of object instances in ground truth groundtruth_is_difficult_list: A boolean numpy array of length M denoting whether a ground truth box is a difficult instance or not'
def _update_ground_truth_statistics(self, groundtruth_class_labels, groundtruth_is_difficult_list):
for class_index in range(self.num_class): num_gt_instances = np.sum((groundtruth_class_labels[(~ groundtruth_is_difficult_list)] == class_index)) self.num_gt_instances_per_class[class_index] += num_gt_instances if np.any((groundtruth_class_labels == class_index)): self.num_gt_imgs_per_class[class_index] += 1
'Compute evaluation result. Returns: average_precision_per_class: float numpy array of average precision for each class. mean_ap: mean average precision of all classes, float scalar precisions_per_class: List of precisions, each precision is a float numpy array recalls_per_class: List of recalls, each recall is a float numpy array corloc_per_class: numpy float array mean_corloc: Mean CorLoc score for each class, float scalar'
def evaluate(self):
if (self.num_gt_instances_per_class == 0).any(): logging.warn('The following classes have no ground truth examples: %s', np.squeeze(np.argwhere((self.num_gt_instances_per_class == 0)))) for class_index in range(self.num_class): if (self.num_gt_instances_per_class[class_index] == 0): continue scores = np.concatenate(self.scores_per_class[class_index]) tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index]) (precision, recall) = metrics.compute_precision_recall(scores, tp_fp_labels, self.num_gt_instances_per_class[class_index]) self.precisions_per_class.append(precision) self.recalls_per_class.append(recall) average_precision = metrics.compute_average_precision(precision, recall) self.average_precision_per_class[class_index] = average_precision self.corloc_per_class = metrics.compute_cor_loc(self.num_gt_imgs_per_class, self.num_images_correctly_detected_per_class) mean_ap = np.nanmean(self.average_precision_per_class) mean_corloc = np.nanmean(self.corloc_per_class) return (self.average_precision_per_class, mean_ap, self.precisions_per_class, self.recalls_per_class, self.corloc_per_class, mean_corloc)
'Tests if a good pyramid image is created.'
def test_diagonal_gradient_image(self):
pyramid_image = test_utils.create_diagonal_gradient_image(3, 4, 2) expected_first_channel = np.array([[3, 2, 1, 0], [4, 3, 2, 1], [5, 4, 3, 2]], dtype=np.float32) self.assertAllEqual(np.squeeze(pyramid_image[:, :, 0]), expected_first_channel) expected_image = np.array([[[3, 30], [2, 20], [1, 10], [0, 0]], [[4, 40], [3, 30], [2, 20], [1, 10]], [[5, 50], [4, 40], [3, 30], [2, 20]]], dtype=np.float32) self.assertAllEqual(pyramid_image, expected_image)
'Tests if valid random boxes are created.'
def test_random_boxes(self):
num_boxes = 1000 max_height = 3 max_width = 5 boxes = test_utils.create_random_boxes(num_boxes, max_height, max_width) true_column = (np.ones(shape=num_boxes) == 1) self.assertAllEqual((boxes[:, 0] < boxes[:, 2]), true_column) self.assertAllEqual((boxes[:, 1] < boxes[:, 3]), true_column) self.assertTrue((boxes[:, 0].min() >= 0)) self.assertTrue((boxes[:, 1].min() >= 0)) self.assertTrue((boxes[:, 2].max() <= max_height)) self.assertTrue((boxes[:, 3].max() <= max_width))
'Constructs box collection. Args: data: a numpy array of shape [N, 4] representing box coordinates Raises: ValueError: if bbox data is not a numpy array ValueError: if invalid dimensions for bbox data'
def __init__(self, data):
if (not isinstance(data, np.ndarray)): raise ValueError('data must be a numpy array.') if ((len(data.shape) != 2) or (data.shape[1] != 4)): raise ValueError('Invalid dimensions for box data.') if ((data.dtype != np.float32) and (data.dtype != np.float64)): raise ValueError('Invalid data type for box data: float is required.') if (not self._is_valid_boxes(data)): raise ValueError('Invalid box data. data must be a numpy array of N*[y_min, x_min, y_max, x_max]') self.data = {'boxes': data}
'Return number of boxes held in collections.'
def num_boxes(self):
return self.data['boxes'].shape[0]
'Return all non-box fields.'
def get_extra_fields(self):
return [k for k in self.data.keys() if (k != 'boxes')]
'Add data to a specified field. Args: field: a string parameter used to speficy a related field to be accessed. field_data: a numpy array of [N, ...] representing the data associated with the field. Raises: ValueError: if the field is already exist or the dimension of the field data does not matches the number of boxes.'
def add_field(self, field, field_data):
if self.has_field(field): raise ValueError((('Field ' + field) + 'already exists')) if ((len(field_data.shape) < 1) or (field_data.shape[0] != self.num_boxes())): raise ValueError('Invalid dimensions for field data') self.data[field] = field_data
'Convenience function for accesssing box coordinates. Returns: a numpy array of shape [N, 4] representing box corners'
def get(self):
return self.get_field('boxes')
'Accesses data associated with the specified field in the box collection. Args: field: a string parameter used to speficy a related field to be accessed. Returns: a numpy 1-d array representing data of an associated field Raises: ValueError: if invalid field'
def get_field(self, field):
if (not self.has_field(field)): raise ValueError('field {} does not exist'.format(field)) return self.data[field]
'Get corner coordinates of boxes. Returns: a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]'
def get_coordinates(self):
box_coordinates = self.get() y_min = box_coordinates[:, 0] x_min = box_coordinates[:, 1] y_max = box_coordinates[:, 2] x_max = box_coordinates[:, 3] return [y_min, x_min, y_max, x_max]
'Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin]. Args: data: a numpy array of shape [N, 4] representing box coordinates Returns: a boolean indicating whether all ymax of boxes are equal or greater than ymin, and all xmax of boxes are equal or greater than xmin.'
def _is_valid_boxes(self, data):
if (data.shape[0] > 0): for i in xrange(data.shape[0]): if ((data[(i, 0)] > data[(i, 2)]) or (data[(i, 1)] > data[(i, 3)])): return False return True
'Tests meshgrid op with vectors, for which it should match numpy.'
def test_meshgrid_numpy_comparison(self):
x = np.arange(4) y = np.arange(6) (exp_xgrid, exp_ygrid) = np.meshgrid(x, y) (xgrid, ygrid) = ops.meshgrid(x, y) with self.test_session() as sess: (xgrid_output, ygrid_output) = sess.run([xgrid, ygrid]) self.assertAllEqual(xgrid_output, exp_xgrid) self.assertAllEqual(ygrid_output, exp_ygrid)
'Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. first_stage_features_stride: Output stride of extracted RPN feature map. reuse_weights: Whether to reuse variables. Default is None. weight_decay: float weight decay for feature extractor (default: 0.0).'
def __init__(self, is_training, first_stage_features_stride, reuse_weights=None, weight_decay=0.0):
self._is_training = is_training self._first_stage_features_stride = first_stage_features_stride self._reuse_weights = reuse_weights self._weight_decay = weight_decay
'Feature-extractor specific preprocessing (minus image resizing).'
@abstractmethod def preprocess(self, resized_inputs):
pass
'Extracts first stage RPN features. This function is responsible for extracting feature maps from preprocessed images. These features are used by the region proposal network (RPN) to predict proposals. Args: preprocessed_inputs: A [batch, height, width, channels] float tensor representing a batch of images. scope: A scope name. Returns: rpn_feature_map: A tensor with shape [batch, height, width, depth]'
def extract_proposal_features(self, preprocessed_inputs, scope):
with tf.variable_scope(scope, values=[preprocessed_inputs]): return self._extract_proposal_features(preprocessed_inputs, scope)
'Extracts first stage RPN features, to be overridden.'
@abstractmethod def _extract_proposal_features(self, preprocessed_inputs, scope):
pass
'Extracts second stage box classifier features. Args: proposal_feature_maps: A 4-D float tensor with shape [batch_size * self.max_num_proposals, crop_height, crop_width, depth] representing the feature map cropped to each proposal. scope: A scope name. Returns: proposal_classifier_features: A 4-D float tensor with shape [batch_size * self.max_num_proposals, height, width, depth] representing box classifier features for each proposal.'
def extract_box_classifier_features(self, proposal_feature_maps, scope):
with tf.variable_scope(scope, values=[proposal_feature_maps]): return self._extract_box_classifier_features(proposal_feature_maps, scope)
'Extracts second stage box classifier features, to be overridden.'
@abstractmethod def _extract_box_classifier_features(self, proposal_feature_maps, scope):
pass
'Returns callable for loading a checkpoint into the tensorflow graph. Args: checkpoint_path: path to checkpoint to restore. first_stage_feature_extractor_scope: A scope name for the first stage feature extractor. second_stage_feature_extractor_scope: A scope name for the second stage feature extractor. Returns: a callable which takes a tf.Session as input and loads a checkpoint when run.'
def restore_from_classification_checkpoint_fn(self, checkpoint_path, first_stage_feature_extractor_scope, second_stage_feature_extractor_scope):
variables_to_restore = {} for variable in tf.global_variables(): for scope_name in [first_stage_feature_extractor_scope, second_stage_feature_extractor_scope]: if variable.op.name.startswith(scope_name): var_name = variable.op.name.replace((scope_name + '/'), '') variables_to_restore[var_name] = variable variables_to_restore = variables_helper.get_variables_available_in_checkpoint(variables_to_restore, checkpoint_path) saver = tf.train.Saver(variables_to_restore) def restore(sess): saver.restore(sess, checkpoint_path) return restore
'FasterRCNNMetaArch Constructor. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. num_classes: Number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). image_resizer_fn: A callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. feature_extractor: A FasterRCNNFeatureExtractor object. first_stage_only: Whether to construct only the Region Proposal Network (RPN) part of the model. first_stage_anchor_generator: An anchor_generator.AnchorGenerator object (note that currently we only support grid_anchor_generator.GridAnchorGenerator objects) first_stage_atrous_rate: A single integer indicating the atrous rate for the single convolution op which is applied to the `rpn_features_to_crop` tensor to obtain a tensor to be used for box prediction. Some feature extractors optionally allow for producing feature maps computed at denser resolutions. The atrous rate is used to compensate for the denser feature maps by using an effectively larger receptive field. (This should typically be set to 1). first_stage_box_predictor_arg_scope: Slim arg_scope for conv2d, separable_conv2d and fully_connected ops for the RPN box predictor. first_stage_box_predictor_kernel_size: Kernel size to use for the convolution op just prior to RPN box predictions. first_stage_box_predictor_depth: Output depth for the convolution op just prior to RPN box predictions. first_stage_minibatch_size: The "batch size" to use for computing the objectness and location loss of the region proposal network. This "batch size" refers to the number of anchors selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. first_stage_positive_balance_fraction: Fraction of positive examples per image for the RPN. The recommended value for Faster RCNN is 0.5. first_stage_nms_score_threshold: Score threshold for non max suppression for the Region Proposal Network (RPN). This value is expected to be in [0, 1] as it is applied directly after a softmax transformation. The recommended value for Faster R-CNN is 0. first_stage_nms_iou_threshold: The Intersection Over Union (IOU) threshold for performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_max_proposals: Maximum number of boxes to retain after performing Non-Max Suppression (NMS) on the boxes predicted by the Region Proposal Network (RPN). first_stage_localization_loss_weight: A float first_stage_objectness_loss_weight: A float initial_crop_size: A single integer indicating the output size (width and height are set to be the same) of the initial bilinear interpolation based cropping during ROI pooling. maxpool_kernel_size: A single integer indicating the kernel size of the max pool op on the cropped feature map during ROI pooling. maxpool_stride: A single integer indicating the stride of the max pool op on the cropped feature map during ROI pooling. second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for the second stage. second_stage_batch_size: The batch size used for computing the classification and refined location loss of the box classifier. This "batch size" refers to the number of proposals selected as contributing to the loss function for any given image within the image batch and is only called "batch_size" due to terminology from the Faster R-CNN paper. second_stage_balance_fraction: Fraction of positive examples to use per image for the box classifier. The recommended value for Faster RCNN is 0.25. second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores`, optional `clip_window` and optional (kwarg) `mask` inputs (with all other inputs already set) and returns a dictionary containing tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes`, `num_detections`, and (optionally) `detection_masks`. See `post_processing.batch_multiclass_non_max_suppression` for the type and shape of these tensors. second_stage_score_conversion_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. second_stage_localization_loss_weight: A float second_stage_classification_loss_weight: A float hard_example_miner: A losses.HardExampleMiner object (can be None). parallel_iterations: (Optional) The number of iterations allowed to run in parallel for calls to tf.map_fn. Raises: ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` ValueError: If first_stage_anchor_generator is not of type grid_anchor_generator.GridAnchorGenerator.'
def __init__(self, is_training, num_classes, image_resizer_fn, feature_extractor, first_stage_only, first_stage_anchor_generator, first_stage_atrous_rate, first_stage_box_predictor_arg_scope, first_stage_box_predictor_kernel_size, first_stage_box_predictor_depth, first_stage_minibatch_size, first_stage_positive_balance_fraction, first_stage_nms_score_threshold, first_stage_nms_iou_threshold, first_stage_max_proposals, first_stage_localization_loss_weight, first_stage_objectness_loss_weight, initial_crop_size, maxpool_kernel_size, maxpool_stride, second_stage_mask_rcnn_box_predictor, second_stage_batch_size, second_stage_balance_fraction, second_stage_non_max_suppression_fn, second_stage_score_conversion_fn, second_stage_localization_loss_weight, second_stage_classification_loss_weight, hard_example_miner, parallel_iterations=16):
super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes) if (second_stage_batch_size > first_stage_max_proposals): raise ValueError('second_stage_batch_size should be no greater than first_stage_max_proposals.') if (not isinstance(first_stage_anchor_generator, grid_anchor_generator.GridAnchorGenerator)): raise ValueError('first_stage_anchor_generator must be of type grid_anchor_generator.GridAnchorGenerator.') self._is_training = is_training self._image_resizer_fn = image_resizer_fn self._feature_extractor = feature_extractor self._first_stage_only = first_stage_only unmatched_cls_target = tf.constant(([1] + (self._num_classes * [0])), dtype=tf.float32) self._proposal_target_assigner = target_assigner.create_target_assigner('FasterRCNN', 'proposal') self._detector_target_assigner = target_assigner.create_target_assigner('FasterRCNN', 'detection', unmatched_cls_target=unmatched_cls_target) self._box_coder = self._proposal_target_assigner.box_coder self._first_stage_anchor_generator = first_stage_anchor_generator self._first_stage_atrous_rate = first_stage_atrous_rate self._first_stage_box_predictor_arg_scope = first_stage_box_predictor_arg_scope self._first_stage_box_predictor_kernel_size = first_stage_box_predictor_kernel_size self._first_stage_box_predictor_depth = first_stage_box_predictor_depth self._first_stage_minibatch_size = first_stage_minibatch_size self._first_stage_sampler = sampler.BalancedPositiveNegativeSampler(positive_fraction=first_stage_positive_balance_fraction) self._first_stage_box_predictor = box_predictor.ConvolutionalBoxPredictor(self._is_training, num_classes=1, conv_hyperparams=self._first_stage_box_predictor_arg_scope, min_depth=0, max_depth=0, num_layers_before_predictor=0, use_dropout=False, dropout_keep_prob=1.0, kernel_size=1, box_code_size=self._box_coder.code_size) self._first_stage_nms_score_threshold = first_stage_nms_score_threshold self._first_stage_nms_iou_threshold = first_stage_nms_iou_threshold self._first_stage_max_proposals = first_stage_max_proposals self._first_stage_localization_loss = losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True) self._first_stage_objectness_loss = losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True) self._first_stage_loc_loss_weight = first_stage_localization_loss_weight self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight self._initial_crop_size = initial_crop_size self._maxpool_kernel_size = maxpool_kernel_size self._maxpool_stride = maxpool_stride self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor self._second_stage_batch_size = second_stage_batch_size self._second_stage_sampler = sampler.BalancedPositiveNegativeSampler(positive_fraction=second_stage_balance_fraction) self._second_stage_nms_fn = second_stage_non_max_suppression_fn self._second_stage_score_conversion_fn = second_stage_score_conversion_fn self._second_stage_localization_loss = losses.WeightedSmoothL1LocalizationLoss(anchorwise_output=True) self._second_stage_classification_loss = losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True) self._second_stage_loc_loss_weight = second_stage_localization_loss_weight self._second_stage_cls_loss_weight = second_stage_classification_loss_weight self._hard_example_miner = hard_example_miner self._parallel_iterations = parallel_iterations
'Max number of proposals (to pad to) for each image in the input batch. At training time, this is set to be the `second_stage_batch_size` if hard example miner is not configured, else it is set to `first_stage_max_proposals`. At inference time, this is always set to `first_stage_max_proposals`. Returns: A positive integer.'
@property def max_num_proposals(self):
if (self._is_training and (not self._hard_example_miner)): return self._second_stage_batch_size return self._first_stage_max_proposals
'Feature-extractor specific preprocessing. See base class. For Faster R-CNN, we perform image resizing in the base class --- each class subclassing FasterRCNNMetaArch is responsible for any additional preprocessing (e.g., scaling pixel values to be in [-1, 1]). Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. Raises: ValueError: if inputs tensor does not have type tf.float32'
def preprocess(self, inputs):
if (inputs.dtype is not tf.float32): raise ValueError('`preprocess` expects a tf.float32 tensor') with tf.name_scope('Preprocessor'): resized_inputs = tf.map_fn(self._image_resizer_fn, elems=inputs, dtype=tf.float32, parallel_iterations=self._parallel_iterations) return self._feature_extractor.preprocess(resized_inputs)
'Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield "raw" un-postprocessed predictions. If `first_stage_only` is True, this function only returns first stage RPN predictions (un-postprocessed). Otherwise it returns both first stage RPN predictions as well as second stage box classifier predictions. Other remarks: + Anchor pruning vs. clipping: following the recommendation of the Faster R-CNN paper, we prune anchors that venture outside the image window at training time and clip anchors to the image window at inference time. + Proposal padding: as described at the top of the file, proposals are padded to self._max_num_proposals and flattened so that proposals from all images within the input batch are arranged along the same batch dimension. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) rpn_box_predictor_features: A 4-D float32 tensor with shape [batch_size, height, width, depth] to be used for predicting proposal boxes and corresponding objectness scores. 2) rpn_features_to_crop: A 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. 3) image_shape: a 1-D tensor of shape [4] representing the input image shape. 4) rpn_box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. 5) rpn_objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN (in absolute coordinates). Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. (and if first_stage_only=False): 7) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, 4] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals 8) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 9) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 10) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in absolute coordinates). 11) mask_predictions: (optional) a 4-D tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask predictions.'
def predict(self, preprocessed_inputs):
(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist, image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs) (rpn_box_encodings, rpn_objectness_predictions_with_background) = self._predict_rpn_proposals(rpn_box_predictor_features) clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]])) if self._is_training: (rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist) = self._remove_invalid_anchors_and_predictions(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors_boxlist, clip_window) else: anchors_boxlist = box_list_ops.clip_to_window(anchors_boxlist, clip_window) anchors = anchors_boxlist.get() prediction_dict = {'rpn_box_predictor_features': rpn_box_predictor_features, 'rpn_features_to_crop': rpn_features_to_crop, 'image_shape': image_shape, 'rpn_box_encodings': rpn_box_encodings, 'rpn_objectness_predictions_with_background': rpn_objectness_predictions_with_background, 'anchors': anchors} if (not self._first_stage_only): prediction_dict.update(self._predict_second_stage(rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape)) return prediction_dict
'Predicts the output tensors from second stage of Faster R-CNN. Args: rpn_box_encodings: 4-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes. rpn_objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). rpn_features_to_crop: A 4-D float32 tensor with shape [batch_size, height, width, depth] representing image features to crop using the proposal boxes predicted by the RPN. anchors: 2-D float tensor of shape [num_anchors, self._box_coder.code_size]. image_shape: A 1D int32 tensors of size [4] containing the image shape. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, 4] representing predicted (final) refined box encodings, where total_num_proposals=batch_size*self._max_num_proposals 2) class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors, where total_num_proposals=batch_size*self._max_num_proposals. Note that this tensor *includes* background class predictions (at class index 0). 3) num_proposals: An int32 tensor of shape [batch_size] representing the number of proposals generated by the RPN. `num_proposals` allows us to keep track of which entries are to be treated as zero paddings and which are not since we always pad the number of proposals to be `self.max_num_proposals` for each image. 4) proposal_boxes: A float32 tensor of shape [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes (in absolute coordinates). 5) mask_predictions: (optional) a 4-D tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask predictions.'
def _predict_second_stage(self, rpn_box_encodings, rpn_objectness_predictions_with_background, rpn_features_to_crop, anchors, image_shape):
(proposal_boxes_normalized, _, num_proposals) = self._postprocess_rpn(rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, image_shape) flattened_proposal_feature_maps = self._compute_second_stage_input_feature_maps(rpn_features_to_crop, proposal_boxes_normalized) box_classifier_features = self._feature_extractor.extract_box_classifier_features(flattened_proposal_feature_maps, scope=self.second_stage_feature_extractor_scope) box_predictions = self._mask_rcnn_box_predictor.predict(box_classifier_features, num_predictions_per_location=1, scope=self.second_stage_box_predictor_scope) refined_box_encodings = tf.squeeze(box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.squeeze(box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) absolute_proposal_boxes = ops.normalized_to_image_coordinates(proposal_boxes_normalized, image_shape, self._parallel_iterations) prediction_dict = {'refined_box_encodings': refined_box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'num_proposals': num_proposals, 'proposal_boxes': absolute_proposal_boxes} return prediction_dict
'Extracts RPN features. This function extracts two feature maps: a feature map to be directly fed to a box predictor (to predict location and objectness scores for proposals) and a feature map from which to crop regions which will then be sent to the second stage box classifier. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. Returns: rpn_box_predictor_features: A 4-D float32 tensor with shape [batch, height, width, depth] to be used for predicting proposal boxes and corresponding objectness scores. rpn_features_to_crop: A 4-D float32 tensor with shape [batch, height, width, depth] representing image features to crop using the proposals boxes. anchors: A BoxList representing anchors (for the RPN) in absolute coordinates. image_shape: A 1-D tensor representing the input image shape.'
def _extract_rpn_feature_maps(self, preprocessed_inputs):
image_shape = tf.shape(preprocessed_inputs) rpn_features_to_crop = self._feature_extractor.extract_proposal_features(preprocessed_inputs, scope=self.first_stage_feature_extractor_scope) feature_map_shape = tf.shape(rpn_features_to_crop) anchors = self._first_stage_anchor_generator.generate([(feature_map_shape[1], feature_map_shape[2])]) with slim.arg_scope(self._first_stage_box_predictor_arg_scope): kernel_size = self._first_stage_box_predictor_kernel_size rpn_box_predictor_features = slim.conv2d(rpn_features_to_crop, self._first_stage_box_predictor_depth, kernel_size=[kernel_size, kernel_size], rate=self._first_stage_atrous_rate, activation_fn=tf.nn.relu6) return (rpn_box_predictor_features, rpn_features_to_crop, anchors, image_shape)
'Adds box predictors to RPN feature map to predict proposals. Note resulting tensors will not have been postprocessed. Args: rpn_box_predictor_features: A 4-D float32 tensor with shape [batch, height, width, depth] to be used for predicting proposal boxes and corresponding objectness scores. Returns: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). Raises: RuntimeError: if the anchor generator generates anchors corresponding to multiple feature maps. We currently assume that a single feature map is generated for the RPN.'
def _predict_rpn_proposals(self, rpn_box_predictor_features):
num_anchors_per_location = self._first_stage_anchor_generator.num_anchors_per_location() if (len(num_anchors_per_location) != 1): raise RuntimeError('anchor_generator is expected to generate anchors corresponding to a single feature map.') box_predictions = self._first_stage_box_predictor.predict(rpn_box_predictor_features, num_anchors_per_location[0], scope=self.first_stage_box_predictor_scope) box_encodings = box_predictions[box_predictor.BOX_ENCODINGS] objectness_predictions_with_background = box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND] return (tf.squeeze(box_encodings, axis=2), objectness_predictions_with_background)
'Removes anchors that (partially) fall outside an image. Also removes associated box encodings and objectness predictions. Args: box_encodings: 3-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted boxes. objectness_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, 2] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN) in absolute coordinates. clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax] extent of the window to clip/prune to. Returns: box_encodings: 4-D float tensor of shape [batch_size, num_valid_anchors, self._box_coder.code_size] containing predicted boxes, where num_valid_anchors <= num_anchors objectness_predictions_with_background: 2-D float tensor of shape [batch_size, num_valid_anchors, 2] containing class predictions (logits) for each of the anchors, where num_valid_anchors <= num_anchors. Note that this tensor *includes* background class predictions (at class index 0). anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in absolute coordinates.'
def _remove_invalid_anchors_and_predictions(self, box_encodings, objectness_predictions_with_background, anchors_boxlist, clip_window):
(pruned_anchors_boxlist, keep_indices) = box_list_ops.prune_outside_window(anchors_boxlist, clip_window) def _batch_gather_kept_indices(predictions_tensor): return tf.map_fn(partial(tf.gather, indices=keep_indices), elems=predictions_tensor, dtype=tf.float32, parallel_iterations=self._parallel_iterations, back_prop=True) return (_batch_gather_kept_indices(box_encodings), _batch_gather_kept_indices(objectness_predictions_with_background), pruned_anchors_boxlist)
'Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor. Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape [A * B, ..., depth]. Args: inputs: A float tensor with shape [A, B, ..., depth]. Note that the first two and last dimensions must be statically defined. Returns: A float tensor with shape [A * B, ..., depth] (where the first and last dimension are statically defined.'
def _flatten_first_two_dimensions(self, inputs):
inputs_shape = inputs.get_shape().as_list() flattened_shape = tf.concat([[(inputs_shape[0] * inputs_shape[1])], tf.shape(inputs)[2:(-1)], [inputs_shape[(-1)]]], 0) return tf.reshape(inputs, flattened_shape)
'Convert prediction tensors to final detections. This function converts raw predictions tensors to final detection results. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_converter is used, then scores are remapped (and may thus have a different interpretation). If first_stage_only=True, the returned results represent proposals from the first stage RPN and are padded to have self.max_num_proposals for each image; otherwise, the results can be interpreted as multiclass detections from the full two-stage model and are padded to self._max_detections. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If first_stage_only=True, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, `image_shape`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, `proposal_boxes` and, optionally, `mask_predictions` fields. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detection, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] (this entry is only created if rpn_mode=False) num_detections: [batch]'
def postprocess(self, prediction_dict):
with tf.name_scope('FirstStagePostprocessor'): image_shape = prediction_dict['image_shape'] if self._first_stage_only: (proposal_boxes, proposal_scores, num_proposals) = self._postprocess_rpn(prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], image_shape) return {'detection_boxes': proposal_boxes, 'detection_scores': proposal_scores, 'num_detections': num_proposals} with tf.name_scope('SecondStagePostprocessor'): mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS) detections_dict = self._postprocess_box_classifier(prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], image_shape, mask_predictions=mask_predictions) return detections_dict
'Converts first stage prediction tensors from the RPN to proposals. This function decodes the raw RPN predictions, runs non-max suppression on the result. Note that the behavior of this function is slightly modified during training --- specifically, we stop the gradient from passing through the proposal boxes and we only return a balanced sampled subset of proposals with size `second_stage_batch_size`. Args: rpn_box_encodings_batch: A 3-D float32 tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background_batch: A 3-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. image_shape: A 1-D tensor representing the input image shape. Returns: proposal_boxes: A float tensor with shape [batch_size, max_num_proposals, 4] representing the (potentially zero padded) proposal boxes for all images in the batch. These boxes are represented as normalized coordinates. proposal_scores: A float tensor with shape [batch_size, max_num_proposals] representing the (potentially zero padded) proposal objectness scores for all images in the batch. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch.'
def _postprocess_rpn(self, rpn_box_encodings_batch, rpn_objectness_predictions_with_background_batch, anchors, image_shape):
clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]])) if self._is_training: (groundtruth_boxlists, groundtruth_classes_with_background_list) = self._format_groundtruth_data(image_shape) proposal_boxes_list = [] proposal_scores_list = [] num_proposals_list = [] for (batch_index, (rpn_box_encodings, rpn_objectness_predictions_with_background)) in enumerate(zip(tf.unstack(rpn_box_encodings_batch), tf.unstack(rpn_objectness_predictions_with_background_batch))): decoded_boxes = self._box_coder.decode(rpn_box_encodings, box_list.BoxList(anchors)) objectness_scores = tf.unstack(tf.nn.softmax(rpn_objectness_predictions_with_background), axis=1)[1] proposal_boxlist = post_processing.multiclass_non_max_suppression(tf.expand_dims(decoded_boxes.get(), 1), tf.expand_dims(objectness_scores, 1), self._first_stage_nms_score_threshold, self._first_stage_nms_iou_threshold, self._first_stage_max_proposals, clip_window=clip_window) if self._is_training: proposal_boxlist.set(tf.stop_gradient(proposal_boxlist.get())) if (not self._hard_example_miner): proposal_boxlist = self._sample_box_classifier_minibatch(proposal_boxlist, groundtruth_boxlists[batch_index], groundtruth_classes_with_background_list[batch_index]) normalized_proposals = box_list_ops.to_normalized_coordinates(proposal_boxlist, image_shape[1], image_shape[2], check_range=False) padded_proposals = box_list_ops.pad_or_clip_box_list(normalized_proposals, num_boxes=self.max_num_proposals) proposal_boxes_list.append(padded_proposals.get()) proposal_scores_list.append(padded_proposals.get_field(fields.BoxListFields.scores)) num_proposals_list.append(tf.minimum(normalized_proposals.num_boxes(), self.max_num_proposals)) return (tf.stack(proposal_boxes_list), tf.stack(proposal_scores_list), tf.stack(num_proposals_list))
'Helper function for preparing groundtruth data for target assignment. In order to be consistent with the model.DetectionModel interface, groundtruth boxes are specified in normalized coordinates and classes are specified as label indices with no assumed background category. To prepare for target assignment, we: 1) convert boxes to absolute coordinates, 2) add a background class at class index 0 Args: image_shape: A 1-D int32 tensor of shape [4] representing the shape of the input image batch. Returns: groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class.'
def _format_groundtruth_data(self, image_shape):
groundtruth_boxlists = [box_list_ops.to_absolute_coordinates(box_list.BoxList(boxes), image_shape[1], image_shape[2]) for boxes in self.groundtruth_lists(fields.BoxListFields.boxes)] groundtruth_classes_with_background_list = [tf.to_float(tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT')) for one_hot_encoding in self.groundtruth_lists(fields.BoxListFields.classes)] return (groundtruth_boxlists, groundtruth_classes_with_background_list)
'Samples a mini-batch of proposals to be sent to the box classifier. Helper function for self._postprocess_rpn. Args: proposal_boxlist: A BoxList containing K proposal boxes in absolute coordinates. groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in absolute coordinates. groundtruth_classes_with_background: A tensor with shape `[N, self.num_classes + 1]` representing groundtruth classes. The classes are assumed to be k-hot encoded, and include background as the zero-th class. Returns: a BoxList contained sampled proposals.'
def _sample_box_classifier_minibatch(self, proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background):
(cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(proposal_boxlist, groundtruth_boxlist, groundtruth_classes_with_background) cls_weights += tf.to_float(tf.equal(tf.reduce_sum(cls_weights), 0)) positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0) sampled_indices = self._second_stage_sampler.subsample(tf.cast(cls_weights, tf.bool), self._second_stage_batch_size, positive_indicator) return box_list_ops.boolean_mask(proposal_boxlist, sampled_indices)
'Crops to a set of proposals from the feature map for a batch of images. Helper function for self._postprocess_rpn. This function calls `tf.image.crop_and_resize` to create the feature map to be passed to the second stage box classifier for each proposal. Args: features_to_crop: A float32 tensor with shape [batch_size, height, width, depth] proposal_boxes_normalized: A float32 tensor with shape [batch_size, num_proposals, box_code_size] containing proposal boxes in normalized coordinates. Returns: A float32 tensor with shape [K, new_height, new_width, depth].'
def _compute_second_stage_input_feature_maps(self, features_to_crop, proposal_boxes_normalized):
def get_box_inds(proposals): proposals_shape = proposals.get_shape().as_list() if any(((dim is None) for dim in proposals_shape)): proposals_shape = tf.shape(proposals) ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32) multiplier = tf.expand_dims(tf.range(start=0, limit=proposals_shape[0]), 1) return tf.reshape((ones_mat * multiplier), [(-1)]) cropped_regions = tf.image.crop_and_resize(features_to_crop, self._flatten_first_two_dimensions(proposal_boxes_normalized), get_box_inds(proposal_boxes_normalized), (self._initial_crop_size, self._initial_crop_size)) return slim.max_pool2d(cropped_regions, [self._maxpool_kernel_size, self._maxpool_kernel_size], stride=self._maxpool_stride)
'Converts predictions from the second stage box classifier to detections. Args: refined_box_encodings: a 3-D tensor with shape [total_num_padded_proposals, num_classes, 4] representing predicted (final) refined box encodings. class_predictions_with_background: a 3-D tensor with shape [total_num_padded_proposals, num_classes + 1] containing class predictions (logits) for each of the proposals. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. image_shape: a 1-D tensor representing the input image shape. mask_predictions: (optional) a 4-D tensor with shape [total_num_padded_proposals, num_classes, mask_height, mask_width] containing instance mask predictions. mask_threshold: a scalar threshold determining which mask values are rounded to 0 or 1. Returns: A dictionary containing: `detection_boxes`: [batch, max_detection, 4] `detection_scores`: [batch, max_detections] `detection_classes`: [batch, max_detections] `num_detections`: [batch] `detection_masks`: (optional) [batch, max_detections, mask_height, mask_width]'
def _postprocess_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, image_shape, mask_predictions=None, mask_threshold=0.5):
refined_box_encodings_batch = tf.reshape(refined_box_encodings, [(-1), self.max_num_proposals, self.num_classes, self._box_coder.code_size]) class_predictions_with_background_batch = tf.reshape(class_predictions_with_background, [(-1), self.max_num_proposals, (self.num_classes + 1)]) refined_decoded_boxes_batch = self._batch_decode_refined_boxes(refined_box_encodings_batch, proposal_boxes) class_predictions_with_background_batch = self._second_stage_score_conversion_fn(class_predictions_with_background_batch) class_predictions_batch = tf.reshape(tf.slice(class_predictions_with_background_batch, [0, 0, 1], [(-1), (-1), (-1)]), [(-1), self.max_num_proposals, self.num_classes]) clip_window = tf.to_float(tf.stack([0, 0, image_shape[1], image_shape[2]])) mask_predictions_batch = None if (mask_predictions is not None): mask_height = mask_predictions.shape[2].value mask_width = mask_predictions.shape[3].value mask_predictions_batch = tf.reshape(mask_predictions, [(-1), self.max_num_proposals, self.num_classes, mask_height, mask_width]) detections = self._second_stage_nms_fn(refined_decoded_boxes_batch, class_predictions_batch, clip_window=clip_window, change_coordinate_frame=True, num_valid_boxes=num_proposals, masks=mask_predictions_batch) if (mask_predictions is not None): detections['detection_masks'] = tf.to_float(tf.greater_equal(detections['detection_masks'], mask_threshold)) return detections
'Decode tensor of refined box encodings. Args: refined_box_encodings: a 3-D tensor with shape [batch_size, max_num_proposals, num_classes, self._box_coder.code_size] representing predicted (final) refined box encodings. proposal_boxes: [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes. Returns: refined_box_predictions: a [batch_size, max_num_proposals, num_classes, 4] float tensor representing (padded) refined bounding box predictions (for each image in batch, proposal and class).'
def _batch_decode_refined_boxes(self, refined_box_encodings, proposal_boxes):
tiled_proposal_boxes = tf.tile(tf.expand_dims(proposal_boxes, 2), [1, 1, self.num_classes, 1]) tiled_proposals_boxlist = box_list.BoxList(tf.reshape(tiled_proposal_boxes, [(-1), 4])) decoded_boxes = self._box_coder.decode(tf.reshape(refined_box_encodings, [(-1), self._box_coder.code_size]), tiled_proposals_boxlist) return tf.reshape(decoded_boxes.get(), [(-1), self.max_num_proposals, self.num_classes, 4])
'Compute scalar loss tensors given prediction tensors. If first_stage_only=True, only RPN related losses are computed (i.e., `rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all losses are computed. Args: prediction_dict: a dictionary holding prediction tensors (see the documentation for the predict method. If first_stage_only=True, we expect prediction_dict to contain `rpn_box_encodings`, `rpn_objectness_predictions_with_background`, `rpn_features_to_crop`, `image_shape`, and `anchors` fields. Otherwise we expect prediction_dict to additionally contain `refined_box_encodings`, `class_predictions_with_background`, `num_proposals`, and `proposal_boxes` fields. scope: Optional scope name. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`, \'second_stage_localization_loss\', \'second_stage_classification_loss\') to scalar tensors representing corresponding loss values.'
def loss(self, prediction_dict, scope=None):
with tf.name_scope(scope, 'Loss', prediction_dict.values()): (groundtruth_boxlists, groundtruth_classes_with_background_list) = self._format_groundtruth_data(prediction_dict['image_shape']) loss_dict = self._loss_rpn(prediction_dict['rpn_box_encodings'], prediction_dict['rpn_objectness_predictions_with_background'], prediction_dict['anchors'], groundtruth_boxlists, groundtruth_classes_with_background_list) if (not self._first_stage_only): loss_dict.update(self._loss_box_classifier(prediction_dict['refined_box_encodings'], prediction_dict['class_predictions_with_background'], prediction_dict['proposal_boxes'], prediction_dict['num_proposals'], groundtruth_boxlists, groundtruth_classes_with_background_list)) return loss_dict
'Computes scalar RPN loss tensors. Uses self._proposal_target_assigner to obtain regression and classification targets for the first stage RPN, samples a "minibatch" of anchors to participate in the loss computation, and returns the RPN losses. Args: rpn_box_encodings: A 4-D float tensor of shape [batch_size, num_anchors, self._box_coder.code_size] containing predicted proposal box encodings. rpn_objectness_predictions_with_background: A 2-D float tensor of shape [batch_size, num_anchors, 2] containing objectness predictions (logits) for each of the anchors with 0 corresponding to background and 1 corresponding to object. anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors for the first stage RPN. Note that `num_anchors` can differ depending on whether the model is created in training or inference mode. groundtruth_boxlists: A list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: A list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes+1] containing the class targets with the 0th index assumed to map to the background class. Returns: a dictionary mapping loss keys (`first_stage_localization_loss`, `first_stage_objectness_loss`) to scalar tensors representing corresponding loss values.'
def _loss_rpn(self, rpn_box_encodings, rpn_objectness_predictions_with_background, anchors, groundtruth_boxlists, groundtruth_classes_with_background_list):
with tf.name_scope('RPNLoss'): (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets(self._proposal_target_assigner, box_list.BoxList(anchors), groundtruth_boxlists, (len(groundtruth_boxlists) * [None])) batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2) def _minibatch_subsample_fn(inputs): (cls_targets, cls_weights) = inputs return self._first_stage_sampler.subsample(tf.cast(cls_weights, tf.bool), self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool)) batch_sampled_indices = tf.to_float(tf.map_fn(_minibatch_subsample_fn, [batch_cls_targets, batch_cls_weights], dtype=tf.bool, parallel_iterations=self._parallel_iterations, back_prop=True)) normalizer = tf.reduce_sum(batch_sampled_indices, axis=1) batch_one_hot_targets = tf.one_hot(tf.to_int32(batch_cls_targets), depth=2) sampled_reg_indices = tf.multiply(batch_sampled_indices, batch_reg_weights) localization_losses = self._first_stage_localization_loss(rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices) objectness_losses = self._first_stage_objectness_loss(rpn_objectness_predictions_with_background, batch_one_hot_targets, weights=batch_sampled_indices) localization_loss = tf.reduce_mean((tf.reduce_sum(localization_losses, axis=1) / normalizer)) objectness_loss = tf.reduce_mean((tf.reduce_sum(objectness_losses, axis=1) / normalizer)) loss_dict = {'first_stage_localization_loss': (self._first_stage_loc_loss_weight * localization_loss), 'first_stage_objectness_loss': (self._first_stage_obj_loss_weight * objectness_loss)} return loss_dict
'Computes scalar box classifier loss tensors. Uses self._detector_target_assigner to obtain regression and classification targets for the second stage box classifier, optionally performs hard mining, and returns losses. All losses are computed independently for each image and then averaged across the batch. This function assumes that the proposal boxes in the "padded" regions are actually zero (and thus should not be matched to). Args: refined_box_encodings: a 3-D tensor with shape [total_num_proposals, num_classes, box_coder.code_size] representing predicted (final) refined box encodings. class_predictions_with_background: a 3-D tensor with shape [total_num_proposals, num_classes + 1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). proposal_boxes: [batch_size, self.max_num_proposals, 4] representing decoded proposal bounding boxes. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. groundtruth_boxlists: a list of BoxLists containing coordinates of the groundtruth boxes. groundtruth_classes_with_background_list: a list of 2-D one-hot (or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the class targets with the 0th index assumed to map to the background class. Returns: a dictionary mapping loss keys (\'second_stage_localization_loss\', \'second_stage_classification_loss\') to scalar tensors representing corresponding loss values.'
def _loss_box_classifier(self, refined_box_encodings, class_predictions_with_background, proposal_boxes, num_proposals, groundtruth_boxlists, groundtruth_classes_with_background_list):
with tf.name_scope('BoxClassifierLoss'): paddings_indicator = self._padded_batched_proposals_indicator(num_proposals, self.max_num_proposals) proposal_boxlists = [box_list.BoxList(proposal_boxes_single_image) for proposal_boxes_single_image in tf.unstack(proposal_boxes)] batch_size = len(proposal_boxlists) num_proposals_or_one = tf.to_float(tf.expand_dims(tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1)) normalizer = (tf.tile(num_proposals_or_one, [1, self.max_num_proposals]) * batch_size) (batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets, batch_reg_weights, _) = target_assigner.batch_assign_targets(self._detector_target_assigner, proposal_boxlists, groundtruth_boxlists, groundtruth_classes_with_background_list) flat_cls_targets_with_background = tf.reshape(batch_cls_targets_with_background, [(batch_size * self.max_num_proposals), (-1)]) refined_box_encodings_with_background = tf.pad(refined_box_encodings, [[0, 0], [1, 0], [0, 0]]) refined_box_encodings_masked_by_class_targets = tf.boolean_mask(refined_box_encodings_with_background, tf.greater(flat_cls_targets_with_background, 0)) reshaped_refined_box_encodings = tf.reshape(refined_box_encodings_masked_by_class_targets, [batch_size, (-1), 4]) second_stage_loc_losses = (self._second_stage_localization_loss(reshaped_refined_box_encodings, batch_reg_targets, weights=batch_reg_weights) / normalizer) second_stage_cls_losses = (self._second_stage_classification_loss(class_predictions_with_background, batch_cls_targets_with_background, weights=batch_cls_weights) / normalizer) second_stage_loc_loss = tf.reduce_sum(tf.boolean_mask(second_stage_loc_losses, paddings_indicator)) second_stage_cls_loss = tf.reduce_sum(tf.boolean_mask(second_stage_cls_losses, paddings_indicator)) if self._hard_example_miner: (second_stage_loc_loss, second_stage_cls_loss) = self._unpad_proposals_and_apply_hard_mining(proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals) loss_dict = {'second_stage_localization_loss': (self._second_stage_loc_loss_weight * second_stage_loc_loss), 'second_stage_classification_loss': (self._second_stage_cls_loss_weight * second_stage_cls_loss)} return loss_dict
'Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals].'
def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals):
batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile(tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile(tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index)
'Unpads proposals and applies hard mining. Args: proposal_boxlists: A list of `batch_size` BoxLists each representing `self.max_num_proposals` representing decoded proposal bounding boxes for each image. second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage localization loss values. second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape `[batch_size, self.max_num_proposals]` representing per-anchor second stage classification loss values. num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch] representing the number of proposals predicted for each image in the batch. Returns: second_stage_loc_loss: A scalar float32 tensor representing the second stage localization loss. second_stage_cls_loss: A scalar float32 tensor representing the second stage classification loss.'
def _unpad_proposals_and_apply_hard_mining(self, proposal_boxlists, second_stage_loc_losses, second_stage_cls_losses, num_proposals):
for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss, single_image_num_proposals) in zip(proposal_boxlists, tf.unstack(second_stage_loc_losses), tf.unstack(second_stage_cls_losses), tf.unstack(num_proposals)): proposal_boxlist = box_list.BoxList(tf.slice(proposal_boxlist.get(), [0, 0], [single_image_num_proposals, (-1)])) single_image_loc_loss = tf.slice(single_image_loc_loss, [0], [single_image_num_proposals]) single_image_cls_loss = tf.slice(single_image_cls_loss, [0], [single_image_num_proposals]) return self._hard_example_miner(location_losses=tf.expand_dims(single_image_loc_loss, 0), cls_losses=tf.expand_dims(single_image_cls_loss, 0), decoded_boxlist_list=[proposal_boxlist])
'Returns callable for loading a checkpoint into the tensorflow graph. Args: checkpoint_path: path to checkpoint to restore. from_detection_checkpoint: whether to restore from a detection checkpoint (with compatible variable names) or to restore from a classification checkpoint for initialization prior to training. Note that when from_detection_checkpoint=True, the current implementation only supports restoration from an (exactly) identical model (with exception of the num_classes parameter). Returns: a callable which takes a tf.Session as input and loads a checkpoint when run.'
def restore_fn(self, checkpoint_path, from_detection_checkpoint=True):
if (not from_detection_checkpoint): return self._feature_extractor.restore_from_classification_checkpoint_fn(checkpoint_path, self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope) variables_to_restore = tf.global_variables() variables_to_restore.append(slim.get_or_create_global_step()) first_stage_variables = tf.contrib.framework.filter_variables(variables_to_restore, include_patterns=[self.first_stage_feature_extractor_scope, self.second_stage_feature_extractor_scope]) saver = tf.train.Saver(first_stage_variables) def restore(sess): saver.restore(sess, checkpoint_path) return restore
'Preprocesses images for feature extraction (minus image resizing). Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images.'
@abstractmethod def preprocess(self, resized_inputs):
pass
'Extracts features from preprocessed inputs. This function is responsible for extracting feature maps from preprocessed images. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]'
@abstractmethod def extract_features(self, preprocessed_inputs):
pass
'SSDMetaArch Constructor. TODO: group NMS parameters + score converter into a class and loss parameters into a class and write config protos for postprocessing and losses. Args: is_training: A boolean indicating whether the training version of the computation graph should be constructed. anchor_generator: an anchor_generator.AnchorGenerator object. box_predictor: a box_predictor.BoxPredictor object. box_coder: a box_coder.BoxCoder object. feature_extractor: a SSDFeatureExtractor object. matcher: a matcher.Matcher object. region_similarity_calculator: a region_similarity_calculator.RegionSimilarityCalculator object. image_resizer_fn: a callable for image resizing. This callable always takes a rank-3 image tensor (corresponding to a single image) and returns a rank-3 image tensor, possibly with new spatial dimensions. See builders/image_resizer_builder.py. non_max_suppression_fn: batch_multiclass_non_max_suppression callable that takes `boxes`, `scores` and optional `clip_window` inputs (with all other inputs already set) and returns a dictionary hold tensors with keys: `detection_boxes`, `detection_scores`, `detection_classes` and `num_detections`. See `post_processing. batch_multiclass_non_max_suppression` for the type and shape of these tensors. score_conversion_fn: callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). This is usually used to convert logits to probabilities. classification_loss: an object_detection.core.losses.Loss object. localization_loss: a object_detection.core.losses.Loss object. classification_loss_weight: float localization_loss_weight: float normalize_loss_by_num_matches: boolean hard_example_miner: a losses.HardExampleMiner object (can be None) add_summaries: boolean (default: True) controlling whether summary ops should be added to tensorflow graph.'
def __init__(self, is_training, anchor_generator, box_predictor, box_coder, feature_extractor, matcher, region_similarity_calculator, image_resizer_fn, non_max_suppression_fn, score_conversion_fn, classification_loss, localization_loss, classification_loss_weight, localization_loss_weight, normalize_loss_by_num_matches, hard_example_miner, add_summaries=True):
super(SSDMetaArch, self).__init__(num_classes=box_predictor.num_classes) self._is_training = is_training self._extract_features_scope = 'FeatureExtractor' self._anchor_generator = anchor_generator self._box_predictor = box_predictor self._box_coder = box_coder self._feature_extractor = feature_extractor self._matcher = matcher self._region_similarity_calculator = region_similarity_calculator unmatched_cls_target = None unmatched_cls_target = tf.constant(([1] + (self.num_classes * [0])), tf.float32) self._target_assigner = target_assigner.TargetAssigner(self._region_similarity_calculator, self._matcher, self._box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=unmatched_cls_target) self._classification_loss = classification_loss self._localization_loss = localization_loss self._classification_loss_weight = classification_loss_weight self._localization_loss_weight = localization_loss_weight self._normalize_loss_by_num_matches = normalize_loss_by_num_matches self._hard_example_miner = hard_example_miner self._image_resizer_fn = image_resizer_fn self._non_max_suppression_fn = non_max_suppression_fn self._score_conversion_fn = score_conversion_fn self._anchors = None self._add_summaries = add_summaries
'Feature-extractor specific preprocessing. See base class. Args: inputs: a [batch, height_in, width_in, channels] float tensor representing a batch of images with values between 0 and 255.0. Returns: preprocessed_inputs: a [batch, height_out, width_out, channels] float tensor representing a batch of images. Raises: ValueError: if inputs tensor does not have type tf.float32'
def preprocess(self, inputs):
if (inputs.dtype is not tf.float32): raise ValueError('`preprocess` expects a tf.float32 tensor') with tf.name_scope('Preprocessor'): resized_inputs = tf.map_fn(self._image_resizer_fn, elems=inputs, dtype=tf.float32) return self._feature_extractor.preprocess(resized_inputs)
'Predicts unpostprocessed tensors from input tensor. This function takes an input batch of images and runs it through the forward pass of the network to yield unpostprocessesed predictions. A side effect of calling the predict method is that self._anchors is populated with a box_list.BoxList of anchors. These anchors must be constructed before the postprocess or loss functions can be called. Args: preprocessed_inputs: a [batch, height, width, channels] image tensor. Returns: prediction_dict: a dictionary holding "raw" prediction tensors: 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 3-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). 3) feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i].'
def predict(self, preprocessed_inputs):
with tf.variable_scope(None, self._extract_features_scope, [preprocessed_inputs]): feature_maps = self._feature_extractor.extract_features(preprocessed_inputs) feature_map_spatial_dims = self._get_feature_map_spatial_dims(feature_maps) self._anchors = self._anchor_generator.generate(feature_map_spatial_dims) (box_encodings, class_predictions_with_background) = self._add_box_predictions_to_feature_maps(feature_maps) predictions_dict = {'box_encodings': box_encodings, 'class_predictions_with_background': class_predictions_with_background, 'feature_maps': feature_maps} return predictions_dict
'Adds box predictors to each feature map and returns concatenated results. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] Returns: box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions (at class index 0). Raises: RuntimeError: if the number of feature maps extracted via the extract_features method does not match the length of the num_anchors_per_locations list that was passed to the constructor. RuntimeError: if box_encodings from the box_predictor does not have shape of the form [batch_size, num_anchors, 1, code_size].'
def _add_box_predictions_to_feature_maps(self, feature_maps):
num_anchors_per_location_list = self._anchor_generator.num_anchors_per_location() if (len(feature_maps) != len(num_anchors_per_location_list)): raise RuntimeError('the number of feature maps must match the length of self.anchors.NumAnchorsPerLocation().') box_encodings_list = [] cls_predictions_with_background_list = [] for (idx, (feature_map, num_anchors_per_location)) in enumerate(zip(feature_maps, num_anchors_per_location_list)): box_predictor_scope = 'BoxPredictor_{}'.format(idx) box_predictions = self._box_predictor.predict(feature_map, num_anchors_per_location, box_predictor_scope) box_encodings = box_predictions[bpredictor.BOX_ENCODINGS] cls_predictions_with_background = box_predictions[bpredictor.CLASS_PREDICTIONS_WITH_BACKGROUND] box_encodings_shape = box_encodings.get_shape().as_list() if ((len(box_encodings_shape) != 4) or (box_encodings_shape[2] != 1)): raise RuntimeError('box_encodings from the box_predictor must be of shape `[batch_size, num_anchors, 1, code_size]`; actual shape', box_encodings_shape) box_encodings = tf.squeeze(box_encodings, axis=2) box_encodings_list.append(box_encodings) cls_predictions_with_background_list.append(cls_predictions_with_background) num_predictions = sum([tf.shape(box_encodings)[1] for box_encodings in box_encodings_list]) num_anchors = self.anchors.num_boxes() anchors_assert = tf.assert_equal(num_anchors, num_predictions, ['Mismatch: number of anchors vs number of predictions', num_anchors, num_predictions]) with tf.control_dependencies([anchors_assert]): box_encodings = tf.concat(box_encodings_list, 1) class_predictions_with_background = tf.concat(cls_predictions_with_background_list, 1) return (box_encodings, class_predictions_with_background)
'Return list of spatial dimensions for each feature map in a list. Args: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i]. Returns: a list of pairs (height, width) for each feature map in feature_maps'
def _get_feature_map_spatial_dims(self, feature_maps):
feature_map_shapes = [feature_map.get_shape().as_list() for feature_map in feature_maps] return [(shape[1], shape[2]) for shape in feature_map_shapes]
'Converts prediction tensors to final detections. This function converts raw predictions tensors to final detection results by slicing off the background class, decoding box predictions and applying non max suppression and clipping to the image window. See base class for output format conventions. Note also that by default, scores are to be interpreted as logits, but if a score_conversion_fn is used, then scores are remapped (and may thus have a different interpretation). Args: prediction_dict: a dictionary holding prediction tensors with 1) box_encodings: 4-D float tensor of shape [batch_size, num_anchors, box_code_dimension] containing predicted boxes. 2) class_predictions_with_background: 2-D float tensor of shape [batch_size, num_anchors, num_classes+1] containing class predictions (logits) for each of the anchors. Note that this tensor *includes* background class predictions. Returns: detections: a dictionary containing the following fields detection_boxes: [batch, max_detection, 4] detection_scores: [batch, max_detections] detection_classes: [batch, max_detections] num_detections: [batch] Raises: ValueError: if prediction_dict does not contain `box_encodings` or `class_predictions_with_background` fields.'
def postprocess(self, prediction_dict):
if (('box_encodings' not in prediction_dict) or ('class_predictions_with_background' not in prediction_dict)): raise ValueError('prediction_dict does not contain expected entries.') with tf.name_scope('Postprocessor'): box_encodings = prediction_dict['box_encodings'] class_predictions = prediction_dict['class_predictions_with_background'] detection_boxes = bcoder.batch_decode(box_encodings, self._box_coder, self.anchors) detection_boxes = tf.expand_dims(detection_boxes, axis=2) class_predictions_without_background = tf.slice(class_predictions, [0, 0, 1], [(-1), (-1), (-1)]) detection_scores = self._score_conversion_fn(class_predictions_without_background) clip_window = tf.constant([0, 0, 1, 1], tf.float32) detections = self._non_max_suppression_fn(detection_boxes, detection_scores, clip_window=clip_window) return detections