josedolot commited on
Commit
e199348
·
1 Parent(s): 0dde52f

Upload utils/utils.py

Browse files
Files changed (1) hide show
  1. utils/utils.py +955 -0
utils/utils.py ADDED
@@ -0,0 +1,955 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import warnings
4
+ from glob import glob
5
+ from typing import Union
6
+ from functools import partial
7
+ from torch.utils.data import DataLoader
8
+ from prefetch_generator import BackgroundGenerator
9
+ import random
10
+ import itertools
11
+ import yaml
12
+ import argparse
13
+
14
+ import cv2
15
+ import numpy as np
16
+ import torch
17
+ from matplotlib import pyplot as plt
18
+ from torch import nn
19
+ from torch.nn.init import _calculate_fan_in_and_fan_out, _no_grad_normal_
20
+ from torchvision.ops.boxes import batched_nms
21
+ from pathlib import Path
22
+ from .sync_batchnorm import SynchronizedBatchNorm2d
23
+
24
+
25
+ class Params:
26
+ def __init__(self, project_file):
27
+ self.params = yaml.safe_load(open(project_file).read())
28
+
29
+ def __getattr__(self, item):
30
+ return self.params.get(item, None)
31
+
32
+
33
+ def save_checkpoint(ckpt, saved_path, name):
34
+ if isinstance(ckpt, dict):
35
+ if isinstance(ckpt['model'], CustomDataParallel):
36
+ ckpt['model'] = ckpt['model'].module.model.state_dict()
37
+ torch.save(ckpt, os.path.join(saved_path, name))
38
+ else:
39
+ ckpt['model'] = ckpt['model'].model.state_dict()
40
+ torch.save(ckpt, os.path.join(saved_path, name))
41
+ else:
42
+ if isinstance(ckpt, CustomDataParallel):
43
+ torch.save(ckpt.module.model.state_dict(), os.path.join(saved_path, name))
44
+ else:
45
+ torch.save(ckpt.model.state_dict(), os.path.join(saved_path, name))
46
+
47
+
48
+ def fitness(x):
49
+ # Model fitness as a weighted combination of metrics
50
+ w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.0] # weights for [P, R, [email protected], [email protected]:0.95, iou score, f1_score, loss]
51
+ return (x[:, :] * w).sum(1)
52
+
53
+
54
+ def invert_affine(metas: Union[float, list, tuple], preds):
55
+ for i in range(len(preds)):
56
+ if len(preds[i]['rois']) == 0:
57
+ continue
58
+ else:
59
+ if metas is float:
60
+ preds[i]['rois'][:, [0, 2]] = preds[i]['rois'][:, [0, 2]] / metas
61
+ preds[i]['rois'][:, [1, 3]] = preds[i]['rois'][:, [1, 3]] / metas
62
+ else:
63
+ new_w, new_h, old_w, old_h, padding_w, padding_h = metas[i]
64
+ preds[i]['rois'][:, [0, 2]] = preds[i]['rois'][:, [0, 2]] / (new_w / old_w)
65
+ preds[i]['rois'][:, [1, 3]] = preds[i]['rois'][:, [1, 3]] / (new_h / old_h)
66
+ return preds
67
+
68
+
69
+ def aspectaware_resize_padding_edited(image, width, height, interpolation=None, means=None):
70
+ old_h, old_w, c = image.shape
71
+ new_h = height
72
+ new_w = width
73
+ padding_h = 0
74
+ padding_w = 0
75
+
76
+ image = cv2.resize(image, (640,384), interpolation=cv2.INTER_AREA)
77
+ return image, new_w, new_h, old_w, old_h, padding_w, padding_h
78
+
79
+
80
+ def aspectaware_resize_padding(image, width, height, interpolation=None, means=None):
81
+ old_h, old_w, c = image.shape
82
+ if old_w > old_h:
83
+ new_w = width
84
+ new_h = int(width / old_w * old_h)
85
+ else:
86
+ new_w = int(height / old_h * old_w)
87
+ new_h = height
88
+
89
+ canvas = np.zeros((height, height, c), np.float32)
90
+ if means is not None:
91
+ canvas[...] = means
92
+
93
+ if new_w != old_w or new_h != old_h:
94
+ if interpolation is None:
95
+ image = cv2.resize(image, (new_w, new_h))
96
+ else:
97
+ image = cv2.resize(image, (new_w, new_h), interpolation=interpolation)
98
+
99
+ padding_h = height - new_h
100
+ padding_w = width - new_w
101
+
102
+ if c > 1:
103
+ canvas[:new_h, :new_w] = image
104
+ else:
105
+ if len(image.shape) == 2:
106
+ canvas[:new_h, :new_w, 0] = image
107
+ else:
108
+ canvas[:new_h, :new_w] = image
109
+
110
+ return canvas, new_w, new_h, old_w, old_h, padding_w, padding_h,
111
+
112
+
113
+ def preprocess(image_path, max_size=512, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
114
+ ori_imgs = [cv2.imread(str(img_path)) for img_path in image_path]
115
+ normalized_imgs = [(img[..., ::-1] / 255 - mean) / std for img in ori_imgs]
116
+
117
+ imgs_meta = [aspectaware_resize_padding_edited(img, 640, 384,
118
+ means=None, interpolation=cv2.INTER_AREA) for img in normalized_imgs]
119
+
120
+ # imgs_meta = [aspectaware_resize_padding(img, max_size, max_size,
121
+ # means=None) for img in normalized_imgs]
122
+
123
+ framed_imgs = [img_meta[0] for img_meta in imgs_meta]
124
+
125
+ framed_metas = [img_meta[1:] for img_meta in imgs_meta]
126
+
127
+ return ori_imgs, framed_imgs, framed_metas
128
+
129
+
130
+ def preprocess_video(*frame_from_video, max_size=512, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)):
131
+ ori_imgs = frame_from_video
132
+ normalized_imgs = [(img[..., ::-1] / 255 - mean) / std for img in ori_imgs]
133
+ imgs_meta = [aspectaware_resize_padding(img, 640, 384,
134
+ means=None) for img in normalized_imgs]
135
+ framed_imgs = [img_meta[0] for img_meta in imgs_meta]
136
+ framed_metas = [img_meta[1:] for img_meta in imgs_meta]
137
+
138
+ return ori_imgs, framed_imgs, framed_metas
139
+
140
+
141
+ def postprocess(x, anchors, regression, classification, regressBoxes, clipBoxes, threshold, iou_threshold):
142
+ transformed_anchors = regressBoxes(anchors, regression)
143
+ transformed_anchors = clipBoxes(transformed_anchors, x)
144
+ scores = torch.max(classification, dim=2, keepdim=True)[0]
145
+ scores_over_thresh = (scores > threshold)[:, :, 0]
146
+ out = []
147
+ for i in range(x.shape[0]):
148
+ if scores_over_thresh[i].sum() == 0:
149
+ out.append({
150
+ 'rois': np.array(()),
151
+ 'class_ids': np.array(()),
152
+ 'scores': np.array(()),
153
+ })
154
+ continue
155
+
156
+ classification_per = classification[i, scores_over_thresh[i, :], ...].permute(1, 0)
157
+ transformed_anchors_per = transformed_anchors[i, scores_over_thresh[i, :], ...]
158
+ scores_per = scores[i, scores_over_thresh[i, :], ...]
159
+ scores_, classes_ = classification_per.max(dim=0)
160
+ anchors_nms_idx = batched_nms(transformed_anchors_per, scores_per[:, 0], classes_, iou_threshold=iou_threshold)
161
+
162
+ if anchors_nms_idx.shape[0] != 0:
163
+ classes_ = classes_[anchors_nms_idx]
164
+ scores_ = scores_[anchors_nms_idx]
165
+ boxes_ = transformed_anchors_per[anchors_nms_idx, :]
166
+
167
+ out.append({
168
+ 'rois': boxes_.cpu().numpy(),
169
+ 'class_ids': classes_.cpu().numpy(),
170
+ 'scores': scores_.cpu().numpy(),
171
+ })
172
+ else:
173
+ out.append({
174
+ 'rois': np.array(()),
175
+ 'class_ids': np.array(()),
176
+ 'scores': np.array(()),
177
+ })
178
+
179
+ return out
180
+
181
+
182
+ def replace_w_sync_bn(m):
183
+ for var_name in dir(m):
184
+ target_attr = getattr(m, var_name)
185
+ if type(target_attr) == torch.nn.BatchNorm2d:
186
+ num_features = target_attr.num_features
187
+ eps = target_attr.eps
188
+ momentum = target_attr.momentum
189
+ affine = target_attr.affine
190
+
191
+ # get parameters
192
+ running_mean = target_attr.running_mean
193
+ running_var = target_attr.running_var
194
+ if affine:
195
+ weight = target_attr.weight
196
+ bias = target_attr.bias
197
+
198
+ setattr(m, var_name,
199
+ SynchronizedBatchNorm2d(num_features, eps, momentum, affine))
200
+
201
+ target_attr = getattr(m, var_name)
202
+ # set parameters
203
+ target_attr.running_mean = running_mean
204
+ target_attr.running_var = running_var
205
+ if affine:
206
+ target_attr.weight = weight
207
+ target_attr.bias = bias
208
+
209
+ for var_name, children in m.named_children():
210
+ replace_w_sync_bn(children)
211
+
212
+
213
+ class CustomDataParallel(nn.DataParallel):
214
+ """
215
+ force splitting data to all gpus instead of sending all data to cuda:0 and then moving around.
216
+ """
217
+
218
+ def __init__(self, module, num_gpus):
219
+ super().__init__(module)
220
+ self.num_gpus = num_gpus
221
+
222
+ def scatter(self, inputs, kwargs, device_ids):
223
+ # More like scatter and data prep at the same time. The point is we prep the data in such a way
224
+ # that no scatter is necessary, and there's no need to shuffle stuff around different GPUs.
225
+ devices = ['cuda:' + str(x) for x in range(self.num_gpus)]
226
+ splits = inputs[0].shape[0] // self.num_gpus
227
+
228
+ if splits == 0:
229
+ raise Exception('Batchsize must be greater than num_gpus.')
230
+
231
+ return [(inputs[0][splits * device_idx: splits * (device_idx + 1)].to(f'cuda:{device_idx}', non_blocking=True),
232
+ inputs[1][splits * device_idx: splits * (device_idx + 1)].to(f'cuda:{device_idx}', non_blocking=True),
233
+ inputs[2][splits * device_idx: splits * (device_idx + 1)].to(f'cuda:{device_idx}', non_blocking=True))
234
+ for device_idx in range(len(devices))], \
235
+ [kwargs] * len(devices)
236
+
237
+
238
+ def get_last_weights(weights_path):
239
+ weights_path = glob(weights_path + f'/*.pth')
240
+ weights_path = sorted(weights_path,
241
+ key=lambda x: int(x.rsplit('_')[-1].rsplit('.')[0]),
242
+ reverse=True)[0]
243
+ print(f'using weights {weights_path}')
244
+ return weights_path
245
+
246
+
247
+ def init_weights(model):
248
+ for name, module in model.named_modules():
249
+ is_conv_layer = isinstance(module, nn.Conv2d)
250
+
251
+ if is_conv_layer:
252
+ if "conv_list" or "header" in name:
253
+ variance_scaling_(module.weight.data)
254
+ else:
255
+ nn.init.kaiming_uniform_(module.weight.data)
256
+
257
+ if module.bias is not None:
258
+ if "classifier.header" in name:
259
+ bias_value = -np.log((1 - 0.01) / 0.01)
260
+ torch.nn.init.constant_(module.bias, bias_value)
261
+ else:
262
+ module.bias.data.zero_()
263
+
264
+
265
+ def variance_scaling_(tensor, gain=1.):
266
+ # type: (Tensor, float) -> Tensor
267
+ r"""
268
+ initializer for SeparableConv in Regressor/Classifier
269
+ reference: https://keras.io/zh/initializers/ VarianceScaling
270
+ """
271
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
272
+ std = math.sqrt(gain / float(fan_in))
273
+
274
+ return _no_grad_normal_(tensor, 0., std)
275
+
276
+
277
+ def boolean_string(s):
278
+ if s not in {'False', 'True'}:
279
+ raise ValueError('Not a valid boolean string')
280
+ return s == 'True'
281
+
282
+
283
+ def restricted_float(x):
284
+ try:
285
+ x = float(x)
286
+ except ValueError:
287
+ raise argparse.ArgumentTypeError("%r not a floating-point literal" % (x,))
288
+
289
+ if x < 0.0 or x > 1.0:
290
+ raise argparse.ArgumentTypeError("%r not in range [0.0, 1.0]"%(x,))
291
+ return x
292
+
293
+
294
+ # --------------------------EVAL UTILS---------------------------
295
+ def process_batch(detections, labels, iou_thresholds):
296
+ """
297
+ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
298
+ Arguments:
299
+ detections (Array[N, 6]), x1, y1, x2, y2, conf, class
300
+
301
+ labels (Array[M, 5]), class, x1, y1, x2, y2
302
+ iou_thresholds: list iou thresholds from 0.5 -> 0.95
303
+ Returns:
304
+ correct (Array[N, 10]), for 10 IoU levels
305
+ """
306
+ labels = labels.to(detections.device)
307
+ # print("ASDA", detections[:, 5].shape)
308
+ # print("SADASD", labels[:, 4].shape)
309
+ correct = torch.zeros(detections.shape[0], iou_thresholds.shape[0], dtype=torch.bool, device=iou_thresholds.device)
310
+ iou = box_iou(labels[:, :4], detections[:, :4])
311
+ # print(labels[:, 4], detections[:, 5])
312
+ x = torch.where((iou >= iou_thresholds[0]) & (labels[:, 4:5] == detections[:, 5]))
313
+ # abc = detections[:,5].unsqueeze(1)
314
+ # print(labels[:, 4] == abc)
315
+ # exit()
316
+ if x[0].shape[0]:
317
+ # [label, detection, iou]
318
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
319
+ if x[0].shape[0] > 1:
320
+ matches = matches[matches[:, 2].argsort()[::-1]]
321
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
322
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
323
+ matches = torch.Tensor(matches).to(iou_thresholds.device)
324
+ correct[matches[:, 1].long()] = matches[:, 2:3] >= iou_thresholds
325
+
326
+ return correct
327
+
328
+
329
+ def box_iou(box1, box2):
330
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
331
+ """
332
+ Return intersection-over-union (Jaccard index) of boxes.
333
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
334
+ Arguments:
335
+ box1 (Tensor[N, 4])
336
+ box2 (Tensor[M, 4])
337
+ Returns:
338
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
339
+ IoU values for every element in boxes1 and boxes2
340
+ """
341
+
342
+ def box_area(box):
343
+ # box = 4xn
344
+ return (box[2] - box[0]) * (box[3] - box[1])
345
+
346
+ box1 = box1.cuda()
347
+ area1 = box_area(box1.T)
348
+ area2 = box_area(box2.T)
349
+
350
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
351
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
352
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
353
+
354
+
355
+ def xywh2xyxy(x):
356
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
357
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
358
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
359
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
360
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
361
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
362
+ return y
363
+
364
+
365
+ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
366
+ if len(coords) == 0:
367
+ return []
368
+ # Rescale coords (xyxy) from img1_shape to img0_shape
369
+ if ratio_pad is None: # calculate from img0_shape
370
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
371
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
372
+ else:
373
+ gain = ratio_pad[0][0]
374
+ pad = ratio_pad[1]
375
+
376
+ coords[:, [0, 2]] -= pad[0] # x padding
377
+ coords[:, [1, 3]] -= pad[1] # y padding
378
+ coords[:, :4] /= gain
379
+ clip_coords(coords, img0_shape)
380
+ return coords
381
+
382
+
383
+ def clip_coords(boxes, shape):
384
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
385
+ if isinstance(boxes, torch.Tensor): # faster individually
386
+ boxes[:, 0].clamp_(0, shape[1]) # x1
387
+ boxes[:, 1].clamp_(0, shape[0]) # y1
388
+ boxes[:, 2].clamp_(0, shape[1]) # x2
389
+ boxes[:, 3].clamp_(0, shape[0]) # y2
390
+ else: # np.array (faster grouped)
391
+ boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
392
+ boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
393
+
394
+
395
+ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):
396
+ """ Compute the average precision, given the recall and precision curves.
397
+ Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
398
+ # Arguments
399
+ tp: True positives (nparray, nx1 or nx10).
400
+ conf: Objectness value from 0-1 (nparray).
401
+ pred_cls: Predicted object classes (nparray).
402
+ target_cls: True object classes (nparray).
403
+ plot: Plot precision-recall curve at [email protected]
404
+ save_dir: Plot save directory
405
+ # Returns
406
+ The average precision as computed in py-faster-rcnn.
407
+ """
408
+
409
+ # Sort by objectness
410
+ i = np.argsort(-conf)
411
+ tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
412
+
413
+ # Find unique classes
414
+ unique_classes = np.unique(target_cls)
415
+
416
+ # Create Precision-Recall curve and compute AP for each class
417
+ px, py = np.linspace(0, 1, 1000), [] # for plotting
418
+ pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
419
+ s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
420
+ ap, p, r = np.zeros(s), np.zeros((unique_classes.shape[0], 1000)), np.zeros((unique_classes.shape[0], 1000))
421
+ for ci, c in enumerate(unique_classes):
422
+ i = pred_cls == c
423
+ n_l = (target_cls == c).sum() # number of labels
424
+ n_p = i.sum() # number of predictions
425
+
426
+ if n_p == 0 or n_l == 0:
427
+ continue
428
+ else:
429
+ # Accumulate FPs and TPs
430
+ fpc = (1 - tp[i]).cumsum(0)
431
+ tpc = tp[i].cumsum(0)
432
+
433
+ # Recall
434
+ recall = tpc / (n_l + 1e-16) # recall curve
435
+ r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
436
+
437
+ # Precision
438
+ precision = tpc / (tpc + fpc) # precision curve
439
+ p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
440
+ # AP from recall-precision curve
441
+ for j in range(tp.shape[1]):
442
+ ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
443
+ if plot and (j == 0):
444
+ py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
445
+
446
+ # Compute F1 score (harmonic mean of precision and recall)
447
+ f1 = 2 * p * r / (p + r + 1e-16)
448
+ i=r.mean(0).argmax()
449
+
450
+ if plot:
451
+ plot_pr_curve(px, py, ap, save_dir, names)
452
+
453
+ return p[:, i], r[:, i], f1[:, i], ap, unique_classes.astype('int32')
454
+
455
+
456
+ def compute_ap(recall, precision):
457
+ """ Compute the average precision, given the recall and precision curves
458
+ # Arguments
459
+ recall: The recall curve (list)
460
+ precision: The precision curve (list)
461
+ # Returns
462
+ Average precision, precision curve, recall curve
463
+ """
464
+
465
+ # Append sentinel values to beginning and end
466
+ mrec = np.concatenate(([0.0], recall, [1.0]))
467
+ mpre = np.concatenate(([1.0], precision, [0.0]))
468
+
469
+ # Compute the precision envelope
470
+ mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
471
+
472
+ # Integrate area under curve
473
+ method = 'interp' # methods: 'continuous', 'interp'
474
+ if method == 'interp':
475
+ x = np.linspace(0, 1, 101) # 101-point interp (COCO)
476
+ ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
477
+ else: # 'continuous'
478
+ i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
479
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
480
+
481
+ return ap, mpre, mrec
482
+
483
+
484
+ def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()):
485
+ # Precision-recall curve
486
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
487
+ py = np.stack(py, axis=1)
488
+
489
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
490
+ for i, y in enumerate(py.T):
491
+ ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision)
492
+ else:
493
+ ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision)
494
+
495
+ ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
496
+ ax.set_xlabel('Recall')
497
+ ax.set_ylabel('Precision')
498
+ ax.set_xlim(0, 1)
499
+ ax.set_ylim(0, 1)
500
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
501
+ fig.savefig(Path(save_dir), dpi=250)
502
+ plt.close()
503
+
504
+
505
+ def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'):
506
+ # Metric-confidence curve
507
+ fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True)
508
+
509
+ if 0 < len(names) < 21: # display per-class legend if < 21 classes
510
+ for i, y in enumerate(py):
511
+ ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric)
512
+ else:
513
+ ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric)
514
+
515
+ y = py.mean(0)
516
+ ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}')
517
+ ax.set_xlabel(xlabel)
518
+ ax.set_ylabel(ylabel)
519
+ ax.set_xlim(0, 1)
520
+ ax.set_ylim(0, 1)
521
+ plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
522
+ fig.savefig(Path(save_dir), dpi=250)
523
+ plt.close()
524
+
525
+
526
+ def cal_weighted_ap(ap50):
527
+ return 0.2 * ap50[1] + 0.3 * ap50[0] + 0.5 * ap50[2]
528
+
529
+
530
+ class ConfusionMatrix:
531
+ # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix
532
+ def __init__(self, nc, conf=0.25, iou_thres=0.45):
533
+ self.matrix = np.zeros((nc + 1, nc + 1))
534
+ self.nc = nc # number of classes
535
+ self.conf = conf
536
+ self.iou_thres = iou_thres
537
+
538
+ def process_batch(self, detections, labels):
539
+ """
540
+ Return intersection-over-union (Jaccard index) of boxes.
541
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
542
+ Arguments:
543
+ detections (Array[N, 6]), x1, y1, x2, y2, conf, class
544
+ labels (Array[M, 5]), class, x1, y1, x2, y2
545
+ Returns:
546
+ None, updates confusion matrix accordingly
547
+ """
548
+ detections = detections[detections[:, 4] > self.conf]
549
+ gt_classes = labels[:, 4].int()
550
+ detection_classes = detections[:, 5].int()
551
+ iou = box_iou(labels[:, :4], detections[:, :4])
552
+
553
+ x = torch.where(iou > self.iou_thres)
554
+ if x[0].shape[0]:
555
+ matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()
556
+ if x[0].shape[0] > 1:
557
+ matches = matches[matches[:, 2].argsort()[::-1]]
558
+ matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
559
+ matches = matches[matches[:, 2].argsort()[::-1]]
560
+ matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
561
+ else:
562
+ matches = np.zeros((0, 3))
563
+
564
+ n = matches.shape[0] > 0
565
+ m0, m1, _ = matches.transpose().astype(np.int16)
566
+ for i, gc in enumerate(gt_classes):
567
+ j = m0 == i
568
+ if n and sum(j) == 1:
569
+ self.matrix[detection_classes[m1[j]], gc] += 1 # correct
570
+ else:
571
+ self.matrix[self.nc, gc] += 1 # background FP
572
+
573
+ if n:
574
+ for i, dc in enumerate(detection_classes):
575
+ if not any(m1 == i):
576
+ self.matrix[dc, self.nc] += 1 # background FN
577
+
578
+ def matrix(self):
579
+ return self.matrix
580
+
581
+ def tp_fp(self):
582
+ tp = self.matrix.diagonal() # true positives
583
+ fp = self.matrix.sum(1) - tp # false positives
584
+ fn = self.matrix.sum(0) - tp # false negatives (missed detections)
585
+
586
+ return tp[:-1], fp[:-1], fn[:-1] # remove background class
587
+
588
+ def plot(self, normalize=True, save_dir='', names=()):
589
+ try:
590
+ import seaborn as sn
591
+
592
+ array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns
593
+ array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)
594
+
595
+ fig = plt.figure(figsize=(12, 9), tight_layout=True)
596
+ sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size
597
+ labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels
598
+ with warnings.catch_warnings():
599
+ warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered
600
+ sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True,
601
+ xticklabels=names + ['background FP'] if labels else "auto",
602
+ yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1))
603
+ fig.axes[0].set_xlabel('True')
604
+ fig.axes[0].set_ylabel('Predicted')
605
+ fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)
606
+ plt.close()
607
+ except Exception as e:
608
+ print(f'WARNING: ConfusionMatrix plot failure: {e}')
609
+
610
+ def print(self):
611
+ for i in range(self.nc + 1):
612
+ print(' '.join(map(str, self.matrix[i])))
613
+
614
+
615
+ class BBoxTransform(nn.Module):
616
+
617
+ def forward(self, anchors, regression):
618
+ y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
619
+ x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
620
+ ha = anchors[..., 2] - anchors[..., 0]
621
+ wa = anchors[..., 3] - anchors[..., 1]
622
+
623
+ w = regression[..., 3].exp() * wa
624
+ h = regression[..., 2].exp() * ha
625
+
626
+ y_centers = regression[..., 0] * ha + y_centers_a
627
+ x_centers = regression[..., 1] * wa + x_centers_a
628
+
629
+ ymin = y_centers - h / 2.
630
+ xmin = x_centers - w / 2.
631
+ ymax = y_centers + h / 2.
632
+ xmax = x_centers + w / 2.
633
+
634
+ return torch.stack([xmin, ymin, xmax, ymax], dim=2)
635
+
636
+
637
+ class ClipBoxes(nn.Module):
638
+
639
+ def __init__(self):
640
+ super(ClipBoxes, self).__init__()
641
+
642
+ def forward(self, boxes, img):
643
+ batch_size, num_channels, height, width = img.shape
644
+
645
+ boxes[:, :, 0] = torch.clamp(boxes[:, :, 0], min=0)
646
+ boxes[:, :, 1] = torch.clamp(boxes[:, :, 1], min=0)
647
+
648
+ boxes[:, :, 2] = torch.clamp(boxes[:, :, 2], max=width - 1)
649
+ boxes[:, :, 3] = torch.clamp(boxes[:, :, 3], max=height - 1)
650
+
651
+ return boxes
652
+
653
+
654
+ class Anchors(nn.Module):
655
+
656
+ def __init__(self, anchor_scale=4., pyramid_levels=None, **kwargs):
657
+ super().__init__()
658
+ self.anchor_scale = anchor_scale
659
+
660
+ if pyramid_levels is None:
661
+ self.pyramid_levels = [3, 4, 5, 6, 7]
662
+ else:
663
+ self.pyramid_levels = pyramid_levels
664
+
665
+ self.strides = kwargs.get('strides', [2 ** x for x in self.pyramid_levels])
666
+ self.scales = np.array(kwargs.get('scales', [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]))
667
+ self.ratios = kwargs.get('ratios', [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)])
668
+
669
+ self.last_anchors = {}
670
+ self.last_shape = None
671
+
672
+ def forward(self, image, dtype=torch.float32):
673
+ """Generates multiscale anchor boxes.
674
+
675
+ Args:
676
+ image_size: integer number of input image size. The input image has the
677
+ same dimension for width and height. The image_size should be divided by
678
+ the largest feature stride 2^max_level.
679
+ anchor_scale: float number representing the scale of size of the base
680
+ anchor to the feature stride 2^level.
681
+ anchor_configs: a dictionary with keys as the levels of anchors and
682
+ values as a list of anchor configuration.
683
+
684
+ Returns:
685
+ anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
686
+ feature levels.
687
+ Raises:
688
+ ValueError: input size must be the multiple of largest feature stride.
689
+ """
690
+ image_shape = image.shape[2:]
691
+
692
+ if image_shape == self.last_shape and image.device in self.last_anchors:
693
+ return self.last_anchors[image.device]
694
+
695
+ if self.last_shape is None or self.last_shape != image_shape:
696
+ self.last_shape = image_shape
697
+
698
+ if dtype == torch.float16:
699
+ dtype = np.float16
700
+ else:
701
+ dtype = np.float32
702
+
703
+ boxes_all = []
704
+ for stride in self.strides:
705
+ boxes_level = []
706
+ for scale, ratio in itertools.product(self.scales, self.ratios):
707
+ if image_shape[1] % stride != 0:
708
+ raise ValueError('input size must be divided by the stride.')
709
+ base_anchor_size = self.anchor_scale * stride * scale
710
+ anchor_size_x_2 = base_anchor_size * ratio[0] / 2.0
711
+ anchor_size_y_2 = base_anchor_size * ratio[1] / 2.0
712
+
713
+ x = np.arange(stride / 2, image_shape[1], stride)
714
+ y = np.arange(stride / 2, image_shape[0], stride)
715
+ xv, yv = np.meshgrid(x, y)
716
+ xv = xv.reshape(-1)
717
+ yv = yv.reshape(-1)
718
+
719
+ # y1,x1,y2,x2
720
+ boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
721
+ yv + anchor_size_y_2, xv + anchor_size_x_2))
722
+ boxes = np.swapaxes(boxes, 0, 1)
723
+ boxes_level.append(np.expand_dims(boxes, axis=1))
724
+ # concat anchors on the same level to the reshape NxAx4
725
+ boxes_level = np.concatenate(boxes_level, axis=1)
726
+ boxes_all.append(boxes_level.reshape([-1, 4]))
727
+
728
+ anchor_boxes = np.vstack(boxes_all)
729
+
730
+ anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device)
731
+ anchor_boxes = anchor_boxes.unsqueeze(0)
732
+
733
+ # save it for later use to reduce overhead
734
+ self.last_anchors[image.device] = anchor_boxes
735
+ return anchor_boxes
736
+
737
+
738
+ class DataLoaderX(DataLoader):
739
+ """prefetch dataloader"""
740
+ def __iter__(self):
741
+ return BackgroundGenerator(super().__iter__())
742
+
743
+
744
+ def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
745
+ """change color hue, saturation, value"""
746
+ r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
747
+ hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
748
+ dtype = img.dtype # uint8
749
+
750
+ x = np.arange(0, 256, dtype=np.int16)
751
+ lut_hue = ((x * r[0]) % 180).astype(dtype)
752
+ lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
753
+ lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
754
+
755
+ img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
756
+ cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
757
+
758
+ # Histogram equalization
759
+ # if random.random() < 0.2:
760
+ # for i in range(3):
761
+ # img[:, :, i] = cv2.equalizeHist(img[:, :, i])
762
+
763
+
764
+ def random_perspective(combination, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
765
+ border=(0, 0)):
766
+ """combination of img transform"""
767
+ # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
768
+ # targets = [cls, xyxy]
769
+ img, gray, line = combination
770
+ height = img.shape[0] + border[0] * 2 # shape(h,w,c)
771
+ width = img.shape[1] + border[1] * 2
772
+
773
+ # Center
774
+ C = np.eye(3)
775
+ C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
776
+ C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
777
+
778
+ # Perspective
779
+ P = np.eye(3)
780
+ P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
781
+ P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
782
+
783
+ # Rotation and Scale
784
+ R = np.eye(3)
785
+ a = random.uniform(-degrees, degrees)
786
+ # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
787
+ s = random.uniform(1 - scale, 1 + scale)
788
+ # s = 2 ** random.uniform(-scale, scale)
789
+ R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
790
+
791
+ # Shear
792
+ S = np.eye(3)
793
+ S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
794
+ S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
795
+
796
+ # Translation
797
+ T = np.eye(3)
798
+ T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
799
+ T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
800
+
801
+ # Combined rotation matrix
802
+ M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
803
+ if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
804
+ if perspective:
805
+ img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
806
+ gray = cv2.warpPerspective(gray, M, dsize=(width, height), borderValue=0)
807
+ line = cv2.warpPerspective(line, M, dsize=(width, height), borderValue=0)
808
+ else: # affine
809
+ img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
810
+ gray = cv2.warpAffine(gray, M[:2], dsize=(width, height), borderValue=0)
811
+ line = cv2.warpAffine(line, M[:2], dsize=(width, height), borderValue=0)
812
+
813
+ # Visualize
814
+ # import matplotlib.pyplot as plt
815
+ # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
816
+ # ax[0].imshow(img[:, :, ::-1]) # base
817
+ # ax[1].imshow(img2[:, :, ::-1]) # warped
818
+
819
+ # Transform label coordinates
820
+ n = len(targets)
821
+ if n:
822
+ # warp points
823
+ xy = np.ones((n * 4, 3))
824
+ xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
825
+ xy = xy @ M.T # transform
826
+ if perspective:
827
+ xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
828
+ else: # affine
829
+ xy = xy[:, :2].reshape(n, 8)
830
+
831
+ # create new boxes
832
+ x = xy[:, [0, 2, 4, 6]]
833
+ y = xy[:, [1, 3, 5, 7]]
834
+ xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
835
+
836
+ # # apply angle-based reduction of bounding boxes
837
+ # radians = a * math.pi / 180
838
+ # reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
839
+ # x = (xy[:, 2] + xy[:, 0]) / 2
840
+ # y = (xy[:, 3] + xy[:, 1]) / 2
841
+ # w = (xy[:, 2] - xy[:, 0]) * reduction
842
+ # h = (xy[:, 3] - xy[:, 1]) * reduction
843
+ # xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
844
+
845
+ # clip boxes
846
+ xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
847
+ xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
848
+
849
+ # filter candidates
850
+ i = _box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
851
+ targets = targets[i]
852
+ targets[:, 1:5] = xy[i]
853
+
854
+ combination = (img, gray, line)
855
+ return combination, targets
856
+
857
+
858
+ def cutout(combination, labels):
859
+ # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
860
+ image, gray = combination
861
+ h, w = image.shape[:2]
862
+
863
+ def bbox_ioa(box1, box2):
864
+ # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
865
+ box2 = box2.transpose()
866
+
867
+ # Get the coordinates of bounding boxes
868
+ b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
869
+ b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
870
+
871
+ # Intersection area
872
+ inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
873
+ (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
874
+
875
+ # box2 area
876
+ box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
877
+
878
+ # Intersection over box2 area
879
+ return inter_area / box2_area
880
+
881
+ # create random masks
882
+ scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
883
+ for s in scales:
884
+ mask_h = random.randint(1, int(h * s))
885
+ mask_w = random.randint(1, int(w * s))
886
+
887
+ # box
888
+ xmin = max(0, random.randint(0, w) - mask_w // 2)
889
+ ymin = max(0, random.randint(0, h) - mask_h // 2)
890
+ xmax = min(w, xmin + mask_w)
891
+ ymax = min(h, ymin + mask_h)
892
+ # print('xmin:{},ymin:{},xmax:{},ymax:{}'.format(xmin,ymin,xmax,ymax))
893
+
894
+ # apply random color mask
895
+ image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
896
+ gray[ymin:ymax, xmin:xmax] = -1
897
+
898
+ # return unobscured labels
899
+ if len(labels) and s > 0.03:
900
+ box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
901
+ ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
902
+ labels = labels[ioa < 0.60] # remove >60% obscured labels
903
+
904
+ return image, gray, labels
905
+
906
+
907
+ def letterbox(combination, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
908
+ """缩放并在图片顶部、底部添加灰边,具体参考:https://zhuanlan.zhihu.com/p/172121380"""
909
+ # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
910
+ img, gray, line = combination
911
+ shape = img.shape[:2] # current shape [height, width]
912
+ if isinstance(new_shape, int):
913
+ new_shape = (new_shape, new_shape)
914
+
915
+ # Scale ratio (new / old)
916
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
917
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
918
+ r = min(r, 1.0)
919
+
920
+ # Compute padding
921
+ ratio = r, r # width, height ratios
922
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
923
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
924
+ if auto: # minimum rectangle
925
+ dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
926
+ elif scaleFill: # stretch
927
+ dw, dh = 0.0, 0.0
928
+ new_unpad = (new_shape[1], new_shape[0])
929
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
930
+
931
+ dw /= 2 # divide padding into 2 sides
932
+ dh /= 2
933
+
934
+ if shape[::-1] != new_unpad: # resize
935
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
936
+ gray = cv2.resize(gray, new_unpad, interpolation=cv2.INTER_LINEAR)
937
+ line = cv2.resize(line, new_unpad, interpolation=cv2.INTER_LINEAR)
938
+
939
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
940
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
941
+
942
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
943
+ gray = cv2.copyMakeBorder(gray, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0) # add border
944
+ line = cv2.copyMakeBorder(line, top, bottom, left, right, cv2.BORDER_CONSTANT, value=0) # add border
945
+
946
+ combination = (img, gray, line)
947
+ return combination, ratio, (dw, dh)
948
+
949
+
950
+ def _box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1): # box1(4,n), box2(4,n)
951
+ # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
952
+ w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
953
+ w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
954
+ ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
955
+ return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates