|
|
|
|
|
import contextlib |
|
import math |
|
import re |
|
import time |
|
|
|
import cv2 |
|
import numpy as np |
|
import torch |
|
import torch.nn.functional as F |
|
import torchvision |
|
from torch.nn import CosineSimilarity |
|
|
|
from ultralytics.utils import LOGGER |
|
|
|
|
|
class Profile(contextlib.ContextDecorator): |
|
""" |
|
YOLOv8 Profile class. Use as a decorator with @Profile() or as a context manager with 'with Profile():'. |
|
|
|
Example: |
|
```python |
|
from ultralytics.utils.ops import Profile |
|
|
|
with Profile() as dt: |
|
pass # slow operation here |
|
|
|
print(dt) # prints "Elapsed time is 9.5367431640625e-07 s" |
|
``` |
|
""" |
|
|
|
def __init__(self, t=0.0): |
|
""" |
|
Initialize the Profile class. |
|
|
|
Args: |
|
t (float): Initial time. Defaults to 0.0. |
|
""" |
|
self.t = t |
|
self.cuda = torch.cuda.is_available() |
|
|
|
def __enter__(self): |
|
"""Start timing.""" |
|
self.start = self.time() |
|
return self |
|
|
|
def __exit__(self, type, value, traceback): |
|
"""Stop timing.""" |
|
self.dt = self.time() - self.start |
|
self.t += self.dt |
|
|
|
def __str__(self): |
|
return f'Elapsed time is {self.t} s' |
|
|
|
def time(self): |
|
"""Get current time.""" |
|
if self.cuda: |
|
torch.cuda.synchronize() |
|
return time.time() |
|
|
|
|
|
def segment2box(segment, width=640, height=640): |
|
""" |
|
Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy). |
|
|
|
Args: |
|
segment (torch.Tensor): the segment label |
|
width (int): the width of the image. Defaults to 640 |
|
height (int): The height of the image. Defaults to 640 |
|
|
|
Returns: |
|
(np.ndarray): the minimum and maximum x and y values of the segment. |
|
""" |
|
|
|
x, y = segment.T |
|
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) |
|
x, y, = x[inside], y[inside] |
|
return np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) if any(x) else np.zeros( |
|
4, dtype=segment.dtype) |
|
|
|
|
|
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True): |
|
""" |
|
Rescales bounding boxes (in the format of xyxy) from the shape of the image they were originally specified in |
|
(img1_shape) to the shape of a different image (img0_shape). |
|
|
|
Args: |
|
img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). |
|
boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2) |
|
img0_shape (tuple): the shape of the target image, in the format of (height, width). |
|
ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be |
|
calculated based on the size difference between the two images. |
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular |
|
rescaling. |
|
|
|
Returns: |
|
boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) |
|
""" |
|
if ratio_pad is None: |
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) |
|
pad = round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), round( |
|
(img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1) |
|
else: |
|
gain = ratio_pad[0][0] |
|
pad = ratio_pad[1] |
|
|
|
if padding: |
|
boxes[..., [0, 2]] -= pad[0] |
|
boxes[..., [1, 3]] -= pad[1] |
|
boxes[..., :4] /= gain |
|
clip_boxes(boxes, img0_shape) |
|
return boxes |
|
|
|
|
|
def make_divisible(x, divisor): |
|
""" |
|
Returns the nearest number that is divisible by the given divisor. |
|
|
|
Args: |
|
x (int): The number to make divisible. |
|
divisor (int | torch.Tensor): The divisor. |
|
|
|
Returns: |
|
(int): The nearest number divisible by the divisor. |
|
""" |
|
if isinstance(divisor, torch.Tensor): |
|
divisor = int(divisor.max()) |
|
return math.ceil(x / divisor) * divisor |
|
|
|
def downsample_masks(masks, scale_factor=1.0): |
|
|
|
downsampled_masks = torch.nn.functional.interpolate(masks.float().unsqueeze(1), scale_factor=scale_factor, mode='bilinear', align_corners=False).squeeze(1) |
|
return downsampled_masks.bool() |
|
|
|
def custom_nms(iou_matrix, conf, iou_threshold): |
|
conf_1d = conf.squeeze() |
|
|
|
sorted_indices = conf_1d.argsort(descending=True) |
|
keep = [] |
|
suppressed = torch.zeros(conf.size(0), dtype=torch.bool, device=conf.device) |
|
|
|
for idx in sorted_indices: |
|
if suppressed[idx]: |
|
continue |
|
|
|
keep.append(idx.item()) |
|
|
|
|
|
suppressed |= (iou_matrix[idx] > iou_threshold).squeeze() |
|
|
|
|
|
suppressed[idx] = False |
|
|
|
keep_indices = torch.tensor(keep, device=iou_matrix.device) |
|
return keep_indices |
|
|
|
|
|
|
|
def non_max_suppression( |
|
prediction, |
|
mask_coef=None, |
|
|
|
proto=None, |
|
img_shape = None, |
|
regression_var = None, |
|
conf_thres=0.25, |
|
iou_thres=0.45, |
|
classes=None, |
|
agnostic=False, |
|
multi_label=False, |
|
labels=(), |
|
max_det=300, |
|
nc=0, |
|
max_time_img=60.0, |
|
max_nms=30000, |
|
max_wh=7680, |
|
): |
|
""" |
|
Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. |
|
|
|
Args: |
|
prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes) |
|
containing the predicted boxes, classes, and masks. The tensor should be in the format |
|
output by a model, such as YOLO. |
|
conf_thres (float): The confidence threshold below which boxes will be filtered out. |
|
Valid values are between 0.0 and 1.0. |
|
iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS. |
|
Valid values are between 0.0 and 1.0. |
|
classes (List[int]): A list of class indices to consider. If None, all classes will be considered. |
|
agnostic (bool): If True, the model is agnostic to the number of classes, and all |
|
classes will be considered as one. |
|
multi_label (bool): If True, each box may have multiple labels. |
|
labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner |
|
list contains the apriori labels for a given image. The list should be in the format |
|
output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2). |
|
max_det (int): The maximum number of boxes to keep after NMS. |
|
nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks. |
|
max_time_img (float): The maximum time (seconds) for processing one image. |
|
max_nms (int): The maximum number of boxes into torchvision.ops.nms(). |
|
max_wh (int): The maximum box width and height in pixels |
|
|
|
Returns: |
|
(List[torch.Tensor]): A list of length batch_size, where each element is a tensor of |
|
shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns |
|
(x1, y1, x2, y2, confidence, class, mask1, mask2, ...). |
|
""" |
|
|
|
|
|
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' |
|
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' |
|
if isinstance(prediction, (list, tuple)): |
|
prediction = prediction[0] |
|
|
|
|
|
|
|
device = prediction.device |
|
mps = 'mps' in device.type |
|
if mps: |
|
prediction = prediction.cpu() |
|
bs = prediction.shape[0] |
|
nc = nc or (prediction.shape[1] - 4) |
|
nm = prediction.shape[1] - nc - 4 |
|
mi = 4 + nc |
|
xc = prediction[:, 4:mi].amax(1) > conf_thres |
|
|
|
|
|
time_limit = 0.5 + max_time_img * bs |
|
multi_label &= nc > 1 |
|
|
|
prediction = prediction.transpose(-1, -2) |
|
prediction[..., :4] = xywh2xyxy(prediction[..., :4]) |
|
|
|
t = time.time() |
|
output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs |
|
|
|
|
|
if regression_var is not None: |
|
saved_reg_var = [torch.zeros((0, 6), device=prediction.device)] * bs |
|
regression_var = regression_var.transpose(-1, -2) |
|
|
|
for xi, x in enumerate(prediction): |
|
|
|
|
|
|
|
x = x[xc[xi]] |
|
|
|
|
|
|
|
|
|
if labels and len(labels[xi]): |
|
lb = labels[xi] |
|
v = torch.zeros((len(lb), nc + nm + 4), device=x.device) |
|
v[:, :4] = xywh2xyxy(lb[:, 1:5]) |
|
v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 |
|
x = torch.cat((x, v), 0) |
|
|
|
if not x.shape[0]: |
|
continue |
|
|
|
|
|
box, cls, mask = x.split((4, nc, nm), 1) |
|
if multi_label: |
|
i, j = torch.where(cls > conf_thres) |
|
x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) |
|
else: |
|
conf, j = cls.max(1, keepdim=True) |
|
x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] |
|
|
|
|
|
if classes is not None: |
|
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] |
|
|
|
|
|
n = x.shape[0] |
|
if not n: |
|
continue |
|
if n > max_nms: |
|
x = x[x[:, 4].argsort(descending=True)[:max_nms]] |
|
|
|
c = x[:, 5:6] * (0 if agnostic else max_wh) |
|
boxes, scores = x[:, :4] + c, x[:, 4] |
|
|
|
if mask_coef is not None: |
|
ih, iw = img_shape |
|
mask_coefficient = mask_coef[xi] |
|
|
|
filtered_mask_coefficient = mask_coefficient[:, xc.squeeze()] |
|
prototype = proto |
|
c, mh, mw = prototype.shape[-3:] |
|
masks = (filtered_mask_coefficient.T @ prototype.float().view(c, -1)).sigmoid().view(-1, mh, mw) |
|
|
|
mask_coefficient = mask_coef[xi].detach() |
|
|
|
filtered_mask_coefficient = mask_coefficient[:, xc.squeeze()].detach() |
|
|
|
|
|
|
|
downsampled_bboxes = boxes.clone().detach() |
|
|
|
downsampled_bboxes[:, 0] *= mw / iw |
|
downsampled_bboxes[:, 2] *= mw / iw |
|
downsampled_bboxes[:, 3] *= mh / ih |
|
downsampled_bboxes[:, 1] *= mh / ih |
|
|
|
masks = crop_mask(masks, downsampled_bboxes) |
|
masks.gt_(0.5) |
|
|
|
|
|
|
|
downsampled_masks_bool = downsample_masks(masks, scale_factor=1.0).detach() |
|
|
|
|
|
masks_bool = downsampled_masks_bool.detach() |
|
|
|
batch_size = 5 |
|
num_masks = masks_bool.size(0) |
|
intersections = [] |
|
unions = [] |
|
min_areas = [] |
|
dices = [] |
|
|
|
for batch_start in range(0, num_masks, batch_size): |
|
batch_end = min(batch_start + batch_size, num_masks) |
|
masks_bool_batch = masks_bool[batch_start:batch_end] |
|
|
|
|
|
intersection_batch = (masks_bool_batch.unsqueeze(1) & masks_bool.unsqueeze(0)).float().sum(dim=(-1, -2)) |
|
union_batch = (masks_bool_batch.unsqueeze(1) | masks_bool.unsqueeze(0)).float().sum(dim=(-1, -2)) |
|
|
|
|
|
area_batch = masks_bool_batch.float().sum(dim=(-1, -2)).unsqueeze(1) |
|
area_all = masks_bool.float().sum(dim=(-1, -2)).unsqueeze(0) |
|
min_area_batch = torch.min(area_batch, area_all) |
|
|
|
|
|
dice_batch = (2 * intersection_batch) / (area_batch + area_all) |
|
|
|
intersections.append(intersection_batch) |
|
unions.append(union_batch) |
|
min_areas.append(min_area_batch) |
|
dices.append(dice_batch) |
|
|
|
|
|
intersection = torch.cat(intersections, dim=0) |
|
union = torch.cat(unions, dim=0) |
|
min_area = torch.cat(min_areas, dim=0) |
|
dice = torch.cat(dices, dim=0) |
|
|
|
iou = intersection / union |
|
|
|
iomin = intersection / min_area |
|
iomin[min_area == 0] = 0 |
|
iou = iou.detach() |
|
kept_indices = custom_nms(dice, conf, iou_thres) |
|
|
|
|
|
|
|
|
|
|
|
del masks, downsampled_masks_bool, intersection, union, iou |
|
import gc |
|
gc.collect() |
|
|
|
|
|
torch.cuda.empty_cache() |
|
i = kept_indices |
|
|
|
|
|
else: |
|
i = torchvision.ops.nms(boxes, scores, iou_thres) |
|
i = i[:max_det] |
|
|
|
if regression_var is not None: |
|
filtered_reg = regression_var[xi][xc[xi]] |
|
saved_reg_var[xi] = filtered_reg[i] |
|
output[xi] = x[i] |
|
if mps: |
|
output[xi] = output[xi].to(device) |
|
if (time.time() - t) > time_limit: |
|
LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') |
|
break |
|
if regression_var is not None: |
|
return output, saved_reg_var |
|
else: |
|
|
|
return output |
|
|
|
|
|
|
|
def clip_boxes(boxes, shape): |
|
""" |
|
Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape. |
|
|
|
Args: |
|
boxes (torch.Tensor): the bounding boxes to clip |
|
shape (tuple): the shape of the image |
|
""" |
|
if isinstance(boxes, torch.Tensor): |
|
boxes[..., 0].clamp_(0, shape[1]) |
|
boxes[..., 1].clamp_(0, shape[0]) |
|
boxes[..., 2].clamp_(0, shape[1]) |
|
boxes[..., 3].clamp_(0, shape[0]) |
|
else: |
|
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) |
|
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) |
|
|
|
|
|
def clip_coords(coords, shape): |
|
""" |
|
Clip line coordinates to the image boundaries. |
|
|
|
Args: |
|
coords (torch.Tensor | numpy.ndarray): A list of line coordinates. |
|
shape (tuple): A tuple of integers representing the size of the image in the format (height, width). |
|
|
|
Returns: |
|
(None): The function modifies the input `coordinates` in place, by clipping each coordinate to the image boundaries. |
|
""" |
|
if isinstance(coords, torch.Tensor): |
|
coords[..., 0].clamp_(0, shape[1]) |
|
coords[..., 1].clamp_(0, shape[0]) |
|
else: |
|
coords[..., 0] = coords[..., 0].clip(0, shape[1]) |
|
coords[..., 1] = coords[..., 1].clip(0, shape[0]) |
|
|
|
|
|
def scale_image(masks, im0_shape, ratio_pad=None): |
|
""" |
|
Takes a mask, and resizes it to the original image size |
|
|
|
Args: |
|
masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3]. |
|
im0_shape (tuple): the original image shape |
|
ratio_pad (tuple): the ratio of the padding to the original image. |
|
|
|
Returns: |
|
masks (torch.Tensor): The masks that are being returned. |
|
""" |
|
|
|
im1_shape = masks.shape |
|
if im1_shape[:2] == im0_shape[:2]: |
|
return masks |
|
if ratio_pad is None: |
|
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) |
|
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 |
|
else: |
|
gain = ratio_pad[0][0] |
|
pad = ratio_pad[1] |
|
top, left = int(pad[1]), int(pad[0]) |
|
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) |
|
|
|
if len(masks.shape) < 2: |
|
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') |
|
masks = masks[top:bottom, left:right] |
|
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) |
|
if len(masks.shape) == 2: |
|
masks = masks[:, :, None] |
|
|
|
return masks |
|
|
|
|
|
def xyxy2xywh(x): |
|
""" |
|
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is the |
|
top-left corner and (x2, y2) is the bottom-right corner. |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format. |
|
""" |
|
assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' |
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) |
|
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 |
|
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 |
|
y[..., 2] = x[..., 2] - x[..., 0] |
|
y[..., 3] = x[..., 3] - x[..., 1] |
|
return y |
|
|
|
|
|
def xywh2xyxy(x): |
|
""" |
|
Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the |
|
top-left corner and (x2, y2) is the bottom-right corner. |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format. |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. |
|
""" |
|
assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' |
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) |
|
dw = x[..., 2] / 2 |
|
dh = x[..., 3] / 2 |
|
y[..., 0] = x[..., 0] - dw |
|
y[..., 1] = x[..., 1] - dh |
|
y[..., 2] = x[..., 0] + dw |
|
y[..., 3] = x[..., 1] + dh |
|
return y |
|
|
|
|
|
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): |
|
""" |
|
Convert normalized bounding box coordinates to pixel coordinates. |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The bounding box coordinates. |
|
w (int): Width of the image. Defaults to 640 |
|
h (int): Height of the image. Defaults to 640 |
|
padw (int): Padding width. Defaults to 0 |
|
padh (int): Padding height. Defaults to 0 |
|
Returns: |
|
y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where |
|
x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. |
|
""" |
|
assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' |
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) |
|
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw |
|
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh |
|
y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw |
|
y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh |
|
return y |
|
|
|
|
|
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): |
|
""" |
|
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. |
|
x, y, width and height are normalized to image dimensions |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. |
|
w (int): The width of the image. Defaults to 640 |
|
h (int): The height of the image. Defaults to 640 |
|
clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False |
|
eps (float): The minimum value of the box's width and height. Defaults to 0.0 |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format |
|
""" |
|
if clip: |
|
clip_boxes(x, (h - eps, w - eps)) |
|
assert x.shape[-1] == 4, f'input shape last dimension expected 4 but input shape is {x.shape}' |
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) |
|
y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w |
|
y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h |
|
y[..., 2] = (x[..., 2] - x[..., 0]) / w |
|
y[..., 3] = (x[..., 3] - x[..., 1]) / h |
|
return y |
|
|
|
|
|
def xywh2ltwh(x): |
|
""" |
|
Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format |
|
""" |
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
y[..., 0] = x[..., 0] - x[..., 2] / 2 |
|
y[..., 1] = x[..., 1] - x[..., 3] / 2 |
|
return y |
|
|
|
|
|
def xyxy2ltwh(x): |
|
""" |
|
Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format. |
|
""" |
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
y[..., 2] = x[..., 2] - x[..., 0] |
|
y[..., 3] = x[..., 3] - x[..., 1] |
|
return y |
|
|
|
|
|
def ltwh2xywh(x): |
|
""" |
|
Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center |
|
|
|
Args: |
|
x (torch.Tensor): the input tensor |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xywh format. |
|
""" |
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
y[..., 0] = x[..., 0] + x[..., 2] / 2 |
|
y[..., 1] = x[..., 1] + x[..., 3] / 2 |
|
return y |
|
|
|
|
|
def xyxyxyxy2xywhr(corners): |
|
""" |
|
Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. |
|
|
|
Args: |
|
corners (numpy.ndarray | torch.Tensor): Input corners of shape (n, 8). |
|
|
|
Returns: |
|
(numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5). |
|
""" |
|
is_numpy = isinstance(corners, np.ndarray) |
|
atan2, sqrt = (np.arctan2, np.sqrt) if is_numpy else (torch.atan2, torch.sqrt) |
|
|
|
x1, y1, x2, y2, x3, y3, x4, y4 = corners.T |
|
cx = (x1 + x3) / 2 |
|
cy = (y1 + y3) / 2 |
|
dx21 = x2 - x1 |
|
dy21 = y2 - y1 |
|
|
|
w = sqrt(dx21 ** 2 + dy21 ** 2) |
|
h = sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2) |
|
|
|
rotation = atan2(-dy21, dx21) |
|
rotation *= 180.0 / math.pi |
|
|
|
return np.vstack((cx, cy, w, h, rotation)).T if is_numpy else torch.stack((cx, cy, w, h, rotation), dim=1) |
|
|
|
|
|
def xywhr2xyxyxyxy(center): |
|
""" |
|
Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. |
|
|
|
Args: |
|
center (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5). |
|
|
|
Returns: |
|
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 8). |
|
""" |
|
is_numpy = isinstance(center, np.ndarray) |
|
cos, sin = (np.cos, np.sin) if is_numpy else (torch.cos, torch.sin) |
|
|
|
cx, cy, w, h, rotation = center.T |
|
rotation *= math.pi / 180.0 |
|
|
|
dx = w / 2 |
|
dy = h / 2 |
|
|
|
cos_rot = cos(rotation) |
|
sin_rot = sin(rotation) |
|
dx_cos_rot = dx * cos_rot |
|
dx_sin_rot = dx * sin_rot |
|
dy_cos_rot = dy * cos_rot |
|
dy_sin_rot = dy * sin_rot |
|
|
|
x1 = cx - dx_cos_rot - dy_sin_rot |
|
y1 = cy + dx_sin_rot - dy_cos_rot |
|
x2 = cx + dx_cos_rot - dy_sin_rot |
|
y2 = cy - dx_sin_rot - dy_cos_rot |
|
x3 = cx + dx_cos_rot + dy_sin_rot |
|
y3 = cy - dx_sin_rot + dy_cos_rot |
|
x4 = cx - dx_cos_rot + dy_sin_rot |
|
y4 = cy + dx_sin_rot + dy_cos_rot |
|
|
|
return np.vstack((x1, y1, x2, y2, x3, y3, x4, y4)).T if is_numpy else torch.stack( |
|
(x1, y1, x2, y2, x3, y3, x4, y4), dim=1) |
|
|
|
|
|
def ltwh2xyxy(x): |
|
""" |
|
It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right |
|
|
|
Args: |
|
x (np.ndarray | torch.Tensor): the input image |
|
|
|
Returns: |
|
y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes. |
|
""" |
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
y[..., 2] = x[..., 2] + x[..., 0] |
|
y[..., 3] = x[..., 3] + x[..., 1] |
|
return y |
|
|
|
|
|
def segments2boxes(segments): |
|
""" |
|
It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) |
|
|
|
Args: |
|
segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates |
|
|
|
Returns: |
|
(np.ndarray): the xywh coordinates of the bounding boxes. |
|
""" |
|
boxes = [] |
|
for s in segments: |
|
x, y = s.T |
|
boxes.append([x.min(), y.min(), x.max(), y.max()]) |
|
return xyxy2xywh(np.array(boxes)) |
|
|
|
|
|
def resample_segments(segments, n=1000): |
|
""" |
|
Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each. |
|
|
|
Args: |
|
segments (list): a list of (n,2) arrays, where n is the number of points in the segment. |
|
n (int): number of points to resample the segment to. Defaults to 1000 |
|
|
|
Returns: |
|
segments (list): the resampled segments. |
|
""" |
|
for i, s in enumerate(segments): |
|
s = np.concatenate((s, s[0:1, :]), axis=0) |
|
x = np.linspace(0, len(s) - 1, n) |
|
xp = np.arange(len(s)) |
|
segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], |
|
dtype=np.float32).reshape(2, -1).T |
|
return segments |
|
|
|
|
|
def crop_mask(masks, boxes): |
|
""" |
|
It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box. |
|
|
|
Args: |
|
masks (torch.Tensor): [n, h, w] tensor of masks |
|
boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form |
|
|
|
Returns: |
|
(torch.Tensor): The masks are being cropped to the bounding box. |
|
""" |
|
n, h, w = masks.shape |
|
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) |
|
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] |
|
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] |
|
|
|
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) |
|
|
|
|
|
def process_mask_upsample(protos, masks_in, bboxes, shape): |
|
""" |
|
Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher |
|
quality but is slower. |
|
|
|
Args: |
|
protos (torch.Tensor): [mask_dim, mask_h, mask_w] |
|
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms |
|
bboxes (torch.Tensor): [n, 4], n is number of masks after nms |
|
shape (tuple): the size of the input image (h,w) |
|
|
|
Returns: |
|
(torch.Tensor): The upsampled masks. |
|
""" |
|
c, mh, mw = protos.shape |
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) |
|
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] |
|
masks = crop_mask(masks, bboxes) |
|
return masks.gt_(0.5) |
|
|
|
|
|
def process_mask(protos, masks_in, bboxes, shape, upsample=False): |
|
""" |
|
Apply masks to bounding boxes using the output of the mask head. |
|
|
|
Args: |
|
protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w]. |
|
masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS. |
|
bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS. |
|
shape (tuple): A tuple of integers representing the size of the input image in the format (h, w). |
|
upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False. |
|
|
|
Returns: |
|
(torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w |
|
are the height and width of the input image. The mask is applied to the bounding boxes. |
|
""" |
|
|
|
c, mh, mw = protos.shape |
|
ih, iw = shape |
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) |
|
|
|
downsampled_bboxes = bboxes.clone() |
|
downsampled_bboxes[:, 0] *= mw / iw |
|
downsampled_bboxes[:, 2] *= mw / iw |
|
downsampled_bboxes[:, 3] *= mh / ih |
|
downsampled_bboxes[:, 1] *= mh / ih |
|
|
|
masks = crop_mask(masks, downsampled_bboxes) |
|
if upsample: |
|
masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] |
|
return masks.gt_(0.5) |
|
|
|
|
|
def process_mask_native(protos, masks_in, bboxes, shape): |
|
""" |
|
It takes the output of the mask head, and crops it after upsampling to the bounding boxes. |
|
|
|
Args: |
|
protos (torch.Tensor): [mask_dim, mask_h, mask_w] |
|
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms |
|
bboxes (torch.Tensor): [n, 4], n is number of masks after nms |
|
shape (tuple): the size of the input image (h,w) |
|
|
|
Returns: |
|
masks (torch.Tensor): The returned masks with dimensions [h, w, n] |
|
""" |
|
c, mh, mw = protos.shape |
|
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) |
|
masks = scale_masks(masks[None], shape)[0] |
|
masks = crop_mask(masks, bboxes) |
|
return masks.gt_(0.5) |
|
|
|
|
|
def scale_masks(masks, shape, padding=True): |
|
""" |
|
Rescale segment masks to shape. |
|
|
|
Args: |
|
masks (torch.Tensor): (N, C, H, W). |
|
shape (tuple): Height and width. |
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular |
|
rescaling. |
|
""" |
|
mh, mw = masks.shape[2:] |
|
gain = min(mh / shape[0], mw / shape[1]) |
|
pad = [mw - shape[1] * gain, mh - shape[0] * gain] |
|
if padding: |
|
pad[0] /= 2 |
|
pad[1] /= 2 |
|
top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) |
|
bottom, right = (int(mh - pad[1]), int(mw - pad[0])) |
|
masks = masks[..., top:bottom, left:right] |
|
|
|
masks = F.interpolate(masks, shape, mode='bilinear', align_corners=False) |
|
return masks |
|
|
|
|
|
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True): |
|
""" |
|
Rescale segment coordinates (xy) from img1_shape to img0_shape |
|
|
|
Args: |
|
img1_shape (tuple): The shape of the image that the coords are from. |
|
coords (torch.Tensor): the coords to be scaled of shape n,2. |
|
img0_shape (tuple): the shape of the image that the segmentation is being applied to. |
|
ratio_pad (tuple): the ratio of the image size to the padded image size. |
|
normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False. |
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular |
|
rescaling. |
|
|
|
Returns: |
|
coords (torch.Tensor): The scaled coordinates. |
|
""" |
|
if ratio_pad is None: |
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) |
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 |
|
else: |
|
gain = ratio_pad[0][0] |
|
pad = ratio_pad[1] |
|
|
|
if padding: |
|
coords[..., 0] -= pad[0] |
|
coords[..., 1] -= pad[1] |
|
coords[..., 0] /= gain |
|
coords[..., 1] /= gain |
|
clip_coords(coords, img0_shape) |
|
if normalize: |
|
coords[..., 0] /= img0_shape[1] |
|
coords[..., 1] /= img0_shape[0] |
|
return coords |
|
|
|
|
|
def masks2segments(masks, strategy='largest'): |
|
""" |
|
It takes a list of masks(n,h,w) and returns a list of segments(n,xy) |
|
|
|
Args: |
|
masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160) |
|
strategy (str): 'concat' or 'largest'. Defaults to largest |
|
|
|
Returns: |
|
segments (List): list of segment masks |
|
""" |
|
segments = [] |
|
for x in masks.int().cpu().numpy().astype('uint8'): |
|
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] |
|
if c: |
|
if strategy == 'concat': |
|
c = np.concatenate([x.reshape(-1, 2) for x in c]) |
|
elif strategy == 'largest': |
|
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) |
|
else: |
|
c = np.zeros((0, 2)) |
|
segments.append(c.astype('float32')) |
|
return segments |
|
|
|
|
|
def clean_str(s): |
|
""" |
|
Cleans a string by replacing special characters with underscore _ |
|
|
|
Args: |
|
s (str): a string needing special characters replaced |
|
|
|
Returns: |
|
(str): a string with special characters replaced by an underscore _ |
|
""" |
|
return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) |
|
|