|
|
|
|
|
import contextlib
|
|
import math
|
|
import re
|
|
import time
|
|
|
|
import cv2
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn.functional as F
|
|
|
|
from ultralytics.utils import LOGGER
|
|
from ultralytics.utils.metrics import batch_probiou
|
|
|
|
|
|
class Profile(contextlib.ContextDecorator):
|
|
"""
|
|
YOLOv8 Profile class. Use as a decorator with @Profile() or as a context manager with 'with Profile():'.
|
|
|
|
Example:
|
|
```python
|
|
from ultralytics.utils.ops import Profile
|
|
|
|
with Profile(device=device) as dt:
|
|
pass # slow operation here
|
|
|
|
print(dt) # prints "Elapsed time is 9.5367431640625e-07 s"
|
|
```
|
|
"""
|
|
|
|
def __init__(self, t=0.0, device: torch.device = None):
|
|
"""
|
|
Initialize the Profile class.
|
|
|
|
Args:
|
|
t (float): Initial time. Defaults to 0.0.
|
|
device (torch.device): Devices used for model inference. Defaults to None (cpu).
|
|
"""
|
|
self.t = t
|
|
self.device = device
|
|
self.cuda = bool(device and str(device).startswith("cuda"))
|
|
|
|
def __enter__(self):
|
|
"""Start timing."""
|
|
self.start = self.time()
|
|
return self
|
|
|
|
def __exit__(self, type, value, traceback):
|
|
"""Stop timing."""
|
|
self.dt = self.time() - self.start
|
|
self.t += self.dt
|
|
|
|
def __str__(self):
|
|
"""Returns a human-readable string representing the accumulated elapsed time in the profiler."""
|
|
return f"Elapsed time is {self.t} s"
|
|
|
|
def time(self):
|
|
"""Get current time."""
|
|
if self.cuda:
|
|
torch.cuda.synchronize(self.device)
|
|
return time.time()
|
|
|
|
|
|
def segment2box(segment, width=640, height=640):
|
|
"""
|
|
Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy).
|
|
|
|
Args:
|
|
segment (torch.Tensor): the segment label
|
|
width (int): the width of the image. Defaults to 640
|
|
height (int): The height of the image. Defaults to 640
|
|
|
|
Returns:
|
|
(np.ndarray): the minimum and maximum x and y values of the segment.
|
|
"""
|
|
x, y = segment.T
|
|
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
|
|
x = x[inside]
|
|
y = y[inside]
|
|
return (
|
|
np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype)
|
|
if any(x)
|
|
else np.zeros(4, dtype=segment.dtype)
|
|
)
|
|
|
|
|
|
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False):
|
|
"""
|
|
Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally
|
|
specified in (img1_shape) to the shape of a different image (img0_shape).
|
|
|
|
Args:
|
|
img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width).
|
|
boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2)
|
|
img0_shape (tuple): the shape of the target image, in the format of (height, width).
|
|
ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be
|
|
calculated based on the size difference between the two images.
|
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
|
|
rescaling.
|
|
xywh (bool): The box format is xywh or not, default=False.
|
|
|
|
Returns:
|
|
boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2)
|
|
"""
|
|
if ratio_pad is None:
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])
|
|
pad = (
|
|
round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1),
|
|
round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1),
|
|
)
|
|
else:
|
|
gain = ratio_pad[0][0]
|
|
pad = ratio_pad[1]
|
|
|
|
if padding:
|
|
boxes[..., 0] -= pad[0]
|
|
boxes[..., 1] -= pad[1]
|
|
if not xywh:
|
|
boxes[..., 2] -= pad[0]
|
|
boxes[..., 3] -= pad[1]
|
|
boxes[..., :4] /= gain
|
|
return clip_boxes(boxes, img0_shape)
|
|
|
|
|
|
def make_divisible(x, divisor):
|
|
"""
|
|
Returns the nearest number that is divisible by the given divisor.
|
|
|
|
Args:
|
|
x (int): The number to make divisible.
|
|
divisor (int | torch.Tensor): The divisor.
|
|
|
|
Returns:
|
|
(int): The nearest number divisible by the divisor.
|
|
"""
|
|
if isinstance(divisor, torch.Tensor):
|
|
divisor = int(divisor.max())
|
|
return math.ceil(x / divisor) * divisor
|
|
|
|
|
|
def nms_rotated(boxes, scores, threshold=0.45):
|
|
"""
|
|
NMS for oriented bounding boxes using probiou and fast-nms.
|
|
|
|
Args:
|
|
boxes (torch.Tensor): Rotated bounding boxes, shape (N, 5), format xywhr.
|
|
scores (torch.Tensor): Confidence scores, shape (N,).
|
|
threshold (float, optional): IoU threshold. Defaults to 0.45.
|
|
|
|
Returns:
|
|
(torch.Tensor): Indices of boxes to keep after NMS.
|
|
"""
|
|
if len(boxes) == 0:
|
|
return np.empty((0,), dtype=np.int8)
|
|
sorted_idx = torch.argsort(scores, descending=True)
|
|
boxes = boxes[sorted_idx]
|
|
ious = batch_probiou(boxes, boxes).triu_(diagonal=1)
|
|
pick = torch.nonzero(ious.max(dim=0)[0] < threshold).squeeze_(-1)
|
|
return sorted_idx[pick]
|
|
|
|
|
|
def non_max_suppression(
|
|
prediction,
|
|
conf_thres=0.25,
|
|
iou_thres=0.45,
|
|
classes=None,
|
|
agnostic=False,
|
|
multi_label=False,
|
|
labels=(),
|
|
max_det=300,
|
|
nc=0,
|
|
max_time_img=0.05,
|
|
max_nms=30000,
|
|
max_wh=7680,
|
|
in_place=True,
|
|
rotated=False,
|
|
):
|
|
"""
|
|
Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box.
|
|
|
|
Args:
|
|
prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes)
|
|
containing the predicted boxes, classes, and masks. The tensor should be in the format
|
|
output by a model, such as YOLO.
|
|
conf_thres (float): The confidence threshold below which boxes will be filtered out.
|
|
Valid values are between 0.0 and 1.0.
|
|
iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS.
|
|
Valid values are between 0.0 and 1.0.
|
|
classes (List[int]): A list of class indices to consider. If None, all classes will be considered.
|
|
agnostic (bool): If True, the model is agnostic to the number of classes, and all
|
|
classes will be considered as one.
|
|
multi_label (bool): If True, each box may have multiple labels.
|
|
labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner
|
|
list contains the apriori labels for a given image. The list should be in the format
|
|
output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2).
|
|
max_det (int): The maximum number of boxes to keep after NMS.
|
|
nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks.
|
|
max_time_img (float): The maximum time (seconds) for processing one image.
|
|
max_nms (int): The maximum number of boxes into torchvision.ops.nms().
|
|
max_wh (int): The maximum box width and height in pixels.
|
|
in_place (bool): If True, the input prediction tensor will be modified in place.
|
|
rotated (bool): If Oriented Bounding Boxes (OBB) are being passed for NMS.
|
|
|
|
Returns:
|
|
(List[torch.Tensor]): A list of length batch_size, where each element is a tensor of
|
|
shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns
|
|
(x1, y1, x2, y2, confidence, class, mask1, mask2, ...).
|
|
"""
|
|
import torchvision
|
|
|
|
|
|
assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
|
|
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
|
|
if isinstance(prediction, (list, tuple)):
|
|
prediction = prediction[0]
|
|
if classes is not None:
|
|
classes = torch.tensor(classes, device=prediction.device)
|
|
|
|
if prediction.shape[-1] == 6:
|
|
output = [pred[pred[:, 4] > conf_thres][:max_det] for pred in prediction]
|
|
if classes is not None:
|
|
output = [pred[(pred[:, 5:6] == classes).any(1)] for pred in output]
|
|
return output
|
|
|
|
bs = prediction.shape[0]
|
|
nc = nc or (prediction.shape[1] - 4)
|
|
nm = prediction.shape[1] - nc - 4
|
|
mi = 4 + nc
|
|
xc = prediction[:, 4:mi].amax(1) > conf_thres
|
|
|
|
|
|
|
|
time_limit = 2.0 + max_time_img * bs
|
|
multi_label &= nc > 1
|
|
|
|
prediction = prediction.transpose(-1, -2)
|
|
if not rotated:
|
|
if in_place:
|
|
prediction[..., :4] = xywh2xyxy(prediction[..., :4])
|
|
else:
|
|
prediction = torch.cat((xywh2xyxy(prediction[..., :4]), prediction[..., 4:]), dim=-1)
|
|
|
|
t = time.time()
|
|
output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
|
|
for xi, x in enumerate(prediction):
|
|
|
|
|
|
x = x[xc[xi]]
|
|
|
|
|
|
if labels and len(labels[xi]) and not rotated:
|
|
lb = labels[xi]
|
|
v = torch.zeros((len(lb), nc + nm + 4), device=x.device)
|
|
v[:, :4] = xywh2xyxy(lb[:, 1:5])
|
|
v[range(len(lb)), lb[:, 0].long() + 4] = 1.0
|
|
x = torch.cat((x, v), 0)
|
|
|
|
|
|
if not x.shape[0]:
|
|
continue
|
|
|
|
|
|
box, cls, mask = x.split((4, nc, nm), 1)
|
|
|
|
if multi_label:
|
|
i, j = torch.where(cls > conf_thres)
|
|
x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1)
|
|
else:
|
|
conf, j = cls.max(1, keepdim=True)
|
|
x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
|
|
|
|
|
|
if classes is not None:
|
|
x = x[(x[:, 5:6] == classes).any(1)]
|
|
|
|
|
|
n = x.shape[0]
|
|
if not n:
|
|
continue
|
|
if n > max_nms:
|
|
x = x[x[:, 4].argsort(descending=True)[:max_nms]]
|
|
|
|
|
|
c = x[:, 5:6] * (0 if agnostic else max_wh)
|
|
scores = x[:, 4]
|
|
if rotated:
|
|
boxes = torch.cat((x[:, :2] + c, x[:, 2:4], x[:, -1:]), dim=-1)
|
|
i = nms_rotated(boxes, scores, iou_thres)
|
|
else:
|
|
boxes = x[:, :4] + c
|
|
i = torchvision.ops.nms(boxes, scores, iou_thres)
|
|
i = i[:max_det]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
output[xi] = x[i]
|
|
if (time.time() - t) > time_limit:
|
|
LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded")
|
|
break
|
|
|
|
return output
|
|
|
|
|
|
def clip_boxes(boxes, shape):
|
|
"""
|
|
Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape.
|
|
|
|
Args:
|
|
boxes (torch.Tensor): the bounding boxes to clip
|
|
shape (tuple): the shape of the image
|
|
|
|
Returns:
|
|
(torch.Tensor | numpy.ndarray): Clipped boxes
|
|
"""
|
|
if isinstance(boxes, torch.Tensor):
|
|
boxes[..., 0] = boxes[..., 0].clamp(0, shape[1])
|
|
boxes[..., 1] = boxes[..., 1].clamp(0, shape[0])
|
|
boxes[..., 2] = boxes[..., 2].clamp(0, shape[1])
|
|
boxes[..., 3] = boxes[..., 3].clamp(0, shape[0])
|
|
else:
|
|
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])
|
|
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])
|
|
return boxes
|
|
|
|
|
|
def clip_coords(coords, shape):
|
|
"""
|
|
Clip line coordinates to the image boundaries.
|
|
|
|
Args:
|
|
coords (torch.Tensor | numpy.ndarray): A list of line coordinates.
|
|
shape (tuple): A tuple of integers representing the size of the image in the format (height, width).
|
|
|
|
Returns:
|
|
(torch.Tensor | numpy.ndarray): Clipped coordinates
|
|
"""
|
|
if isinstance(coords, torch.Tensor):
|
|
coords[..., 0] = coords[..., 0].clamp(0, shape[1])
|
|
coords[..., 1] = coords[..., 1].clamp(0, shape[0])
|
|
else:
|
|
coords[..., 0] = coords[..., 0].clip(0, shape[1])
|
|
coords[..., 1] = coords[..., 1].clip(0, shape[0])
|
|
return coords
|
|
|
|
|
|
def scale_image(masks, im0_shape, ratio_pad=None):
|
|
"""
|
|
Takes a mask, and resizes it to the original image size.
|
|
|
|
Args:
|
|
masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
|
|
im0_shape (tuple): the original image shape
|
|
ratio_pad (tuple): the ratio of the padding to the original image.
|
|
|
|
Returns:
|
|
masks (np.ndarray): The masks that are being returned with shape [h, w, num].
|
|
"""
|
|
|
|
im1_shape = masks.shape
|
|
if im1_shape[:2] == im0_shape[:2]:
|
|
return masks
|
|
if ratio_pad is None:
|
|
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1])
|
|
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2
|
|
else:
|
|
|
|
pad = ratio_pad[1]
|
|
top, left = int(pad[1]), int(pad[0])
|
|
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
|
|
|
|
if len(masks.shape) < 2:
|
|
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
|
|
masks = masks[top:bottom, left:right]
|
|
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
|
|
if len(masks.shape) == 2:
|
|
masks = masks[:, :, None]
|
|
|
|
return masks
|
|
|
|
|
|
def xyxy2xywh(x):
|
|
"""
|
|
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is the
|
|
top-left corner and (x2, y2) is the bottom-right corner.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format.
|
|
"""
|
|
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
|
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x)
|
|
y[..., 0] = (x[..., 0] + x[..., 2]) / 2
|
|
y[..., 1] = (x[..., 1] + x[..., 3]) / 2
|
|
y[..., 2] = x[..., 2] - x[..., 0]
|
|
y[..., 3] = x[..., 3] - x[..., 1]
|
|
return y
|
|
|
|
|
|
def xywh2xyxy(x):
|
|
"""
|
|
Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the
|
|
top-left corner and (x2, y2) is the bottom-right corner. Note: ops per 2 channels faster than per channel.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format.
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format.
|
|
"""
|
|
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
|
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x)
|
|
xy = x[..., :2]
|
|
wh = x[..., 2:] / 2
|
|
y[..., :2] = xy - wh
|
|
y[..., 2:] = xy + wh
|
|
return y
|
|
|
|
|
|
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
|
|
"""
|
|
Convert normalized bounding box coordinates to pixel coordinates.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The bounding box coordinates.
|
|
w (int): Width of the image. Defaults to 640
|
|
h (int): Height of the image. Defaults to 640
|
|
padw (int): Padding width. Defaults to 0
|
|
padh (int): Padding height. Defaults to 0
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where
|
|
x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box.
|
|
"""
|
|
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
|
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x)
|
|
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw
|
|
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh
|
|
y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw
|
|
y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh
|
|
return y
|
|
|
|
|
|
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
|
|
"""
|
|
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. x, y,
|
|
width and height are normalized to image dimensions.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
|
|
w (int): The width of the image. Defaults to 640
|
|
h (int): The height of the image. Defaults to 640
|
|
clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False
|
|
eps (float): The minimum value of the box's width and height. Defaults to 0.0
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format
|
|
"""
|
|
if clip:
|
|
x = clip_boxes(x, (h - eps, w - eps))
|
|
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
|
|
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x)
|
|
y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w
|
|
y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h
|
|
y[..., 2] = (x[..., 2] - x[..., 0]) / w
|
|
y[..., 3] = (x[..., 3] - x[..., 1]) / h
|
|
return y
|
|
|
|
|
|
def xywh2ltwh(x):
|
|
"""
|
|
Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format
|
|
"""
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
y[..., 0] = x[..., 0] - x[..., 2] / 2
|
|
y[..., 1] = x[..., 1] - x[..., 3] / 2
|
|
return y
|
|
|
|
|
|
def xyxy2ltwh(x):
|
|
"""
|
|
Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format.
|
|
"""
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
y[..., 2] = x[..., 2] - x[..., 0]
|
|
y[..., 3] = x[..., 3] - x[..., 1]
|
|
return y
|
|
|
|
|
|
def ltwh2xywh(x):
|
|
"""
|
|
Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center.
|
|
|
|
Args:
|
|
x (torch.Tensor): the input tensor
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xywh format.
|
|
"""
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
y[..., 0] = x[..., 0] + x[..., 2] / 2
|
|
y[..., 1] = x[..., 1] + x[..., 3] / 2
|
|
return y
|
|
|
|
|
|
def xyxyxyxy2xywhr(x):
|
|
"""
|
|
Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are
|
|
returned in radians from 0 to pi/2.
|
|
|
|
Args:
|
|
x (numpy.ndarray | torch.Tensor): Input box corners [xy1, xy2, xy3, xy4] of shape (n, 8).
|
|
|
|
Returns:
|
|
(numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5).
|
|
"""
|
|
is_torch = isinstance(x, torch.Tensor)
|
|
points = x.cpu().numpy() if is_torch else x
|
|
points = points.reshape(len(x), -1, 2)
|
|
rboxes = []
|
|
for pts in points:
|
|
|
|
|
|
(cx, cy), (w, h), angle = cv2.minAreaRect(pts)
|
|
rboxes.append([cx, cy, w, h, angle / 180 * np.pi])
|
|
return torch.tensor(rboxes, device=x.device, dtype=x.dtype) if is_torch else np.asarray(rboxes)
|
|
|
|
|
|
def xywhr2xyxyxyxy(x):
|
|
"""
|
|
Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should
|
|
be in radians from 0 to pi/2.
|
|
|
|
Args:
|
|
x (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
|
|
|
|
Returns:
|
|
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
|
|
"""
|
|
cos, sin, cat, stack = (
|
|
(torch.cos, torch.sin, torch.cat, torch.stack)
|
|
if isinstance(x, torch.Tensor)
|
|
else (np.cos, np.sin, np.concatenate, np.stack)
|
|
)
|
|
|
|
ctr = x[..., :2]
|
|
w, h, angle = (x[..., i : i + 1] for i in range(2, 5))
|
|
cos_value, sin_value = cos(angle), sin(angle)
|
|
vec1 = [w / 2 * cos_value, w / 2 * sin_value]
|
|
vec2 = [-h / 2 * sin_value, h / 2 * cos_value]
|
|
vec1 = cat(vec1, -1)
|
|
vec2 = cat(vec2, -1)
|
|
pt1 = ctr + vec1 + vec2
|
|
pt2 = ctr + vec1 - vec2
|
|
pt3 = ctr - vec1 - vec2
|
|
pt4 = ctr - vec1 + vec2
|
|
return stack([pt1, pt2, pt3, pt4], -2)
|
|
|
|
|
|
def ltwh2xyxy(x):
|
|
"""
|
|
It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right.
|
|
|
|
Args:
|
|
x (np.ndarray | torch.Tensor): the input image
|
|
|
|
Returns:
|
|
y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes.
|
|
"""
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
|
y[..., 2] = x[..., 2] + x[..., 0]
|
|
y[..., 3] = x[..., 3] + x[..., 1]
|
|
return y
|
|
|
|
|
|
def segments2boxes(segments):
|
|
"""
|
|
It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh).
|
|
|
|
Args:
|
|
segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates
|
|
|
|
Returns:
|
|
(np.ndarray): the xywh coordinates of the bounding boxes.
|
|
"""
|
|
boxes = []
|
|
for s in segments:
|
|
x, y = s.T
|
|
boxes.append([x.min(), y.min(), x.max(), y.max()])
|
|
return xyxy2xywh(np.array(boxes))
|
|
|
|
|
|
def resample_segments(segments, n=1000):
|
|
"""
|
|
Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each.
|
|
|
|
Args:
|
|
segments (list): a list of (n,2) arrays, where n is the number of points in the segment.
|
|
n (int): number of points to resample the segment to. Defaults to 1000
|
|
|
|
Returns:
|
|
segments (list): the resampled segments.
|
|
"""
|
|
for i, s in enumerate(segments):
|
|
s = np.concatenate((s, s[0:1, :]), axis=0)
|
|
x = np.linspace(0, len(s) - 1, n)
|
|
xp = np.arange(len(s))
|
|
segments[i] = (
|
|
np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], dtype=np.float32).reshape(2, -1).T
|
|
)
|
|
return segments
|
|
|
|
|
|
def crop_mask(masks, boxes):
|
|
"""
|
|
It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box.
|
|
|
|
Args:
|
|
masks (torch.Tensor): [n, h, w] tensor of masks
|
|
boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form
|
|
|
|
Returns:
|
|
(torch.Tensor): The masks are being cropped to the bounding box.
|
|
"""
|
|
_, h, w = masks.shape
|
|
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1)
|
|
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :]
|
|
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None]
|
|
|
|
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
|
|
|
|
|
|
def process_mask(protos, masks_in, bboxes, shape, upsample=False):
|
|
"""
|
|
Apply masks to bounding boxes using the output of the mask head.
|
|
|
|
Args:
|
|
protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w].
|
|
masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS.
|
|
bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS.
|
|
shape (tuple): A tuple of integers representing the size of the input image in the format (h, w).
|
|
upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False.
|
|
|
|
Returns:
|
|
(torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w
|
|
are the height and width of the input image. The mask is applied to the bounding boxes.
|
|
"""
|
|
c, mh, mw = protos.shape
|
|
ih, iw = shape
|
|
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
|
|
width_ratio = mw / iw
|
|
height_ratio = mh / ih
|
|
|
|
downsampled_bboxes = bboxes.clone()
|
|
downsampled_bboxes[:, 0] *= width_ratio
|
|
downsampled_bboxes[:, 2] *= width_ratio
|
|
downsampled_bboxes[:, 3] *= height_ratio
|
|
downsampled_bboxes[:, 1] *= height_ratio
|
|
|
|
masks = crop_mask(masks, downsampled_bboxes)
|
|
if upsample:
|
|
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0]
|
|
return masks.gt_(0.0)
|
|
|
|
|
|
def process_mask_native(protos, masks_in, bboxes, shape):
|
|
"""
|
|
It takes the output of the mask head, and crops it after upsampling to the bounding boxes.
|
|
|
|
Args:
|
|
protos (torch.Tensor): [mask_dim, mask_h, mask_w]
|
|
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms
|
|
bboxes (torch.Tensor): [n, 4], n is number of masks after nms
|
|
shape (tuple): the size of the input image (h,w)
|
|
|
|
Returns:
|
|
masks (torch.Tensor): The returned masks with dimensions [h, w, n]
|
|
"""
|
|
c, mh, mw = protos.shape
|
|
masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
|
|
masks = scale_masks(masks[None], shape)[0]
|
|
masks = crop_mask(masks, bboxes)
|
|
return masks.gt_(0.0)
|
|
|
|
|
|
def scale_masks(masks, shape, padding=True):
|
|
"""
|
|
Rescale segment masks to shape.
|
|
|
|
Args:
|
|
masks (torch.Tensor): (N, C, H, W).
|
|
shape (tuple): Height and width.
|
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
|
|
rescaling.
|
|
"""
|
|
mh, mw = masks.shape[2:]
|
|
gain = min(mh / shape[0], mw / shape[1])
|
|
pad = [mw - shape[1] * gain, mh - shape[0] * gain]
|
|
if padding:
|
|
pad[0] /= 2
|
|
pad[1] /= 2
|
|
top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0)
|
|
bottom, right = (int(mh - pad[1]), int(mw - pad[0]))
|
|
masks = masks[..., top:bottom, left:right]
|
|
|
|
masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False)
|
|
return masks
|
|
|
|
|
|
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True):
|
|
"""
|
|
Rescale segment coordinates (xy) from img1_shape to img0_shape.
|
|
|
|
Args:
|
|
img1_shape (tuple): The shape of the image that the coords are from.
|
|
coords (torch.Tensor): the coords to be scaled of shape n,2.
|
|
img0_shape (tuple): the shape of the image that the segmentation is being applied to.
|
|
ratio_pad (tuple): the ratio of the image size to the padded image size.
|
|
normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False.
|
|
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
|
|
rescaling.
|
|
|
|
Returns:
|
|
coords (torch.Tensor): The scaled coordinates.
|
|
"""
|
|
if ratio_pad is None:
|
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])
|
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2
|
|
else:
|
|
gain = ratio_pad[0][0]
|
|
pad = ratio_pad[1]
|
|
|
|
if padding:
|
|
coords[..., 0] -= pad[0]
|
|
coords[..., 1] -= pad[1]
|
|
coords[..., 0] /= gain
|
|
coords[..., 1] /= gain
|
|
coords = clip_coords(coords, img0_shape)
|
|
if normalize:
|
|
coords[..., 0] /= img0_shape[1]
|
|
coords[..., 1] /= img0_shape[0]
|
|
return coords
|
|
|
|
|
|
def regularize_rboxes(rboxes):
|
|
"""
|
|
Regularize rotated boxes in range [0, pi/2].
|
|
|
|
Args:
|
|
rboxes (torch.Tensor): Input boxes of shape(N, 5) in xywhr format.
|
|
|
|
Returns:
|
|
(torch.Tensor): The regularized boxes.
|
|
"""
|
|
x, y, w, h, t = rboxes.unbind(dim=-1)
|
|
|
|
w_ = torch.where(w > h, w, h)
|
|
h_ = torch.where(w > h, h, w)
|
|
t = torch.where(w > h, t, t + math.pi / 2) % math.pi
|
|
return torch.stack([x, y, w_, h_, t], dim=-1)
|
|
|
|
|
|
def masks2segments(masks, strategy="largest"):
|
|
"""
|
|
It takes a list of masks(n,h,w) and returns a list of segments(n,xy).
|
|
|
|
Args:
|
|
masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160)
|
|
strategy (str): 'concat' or 'largest'. Defaults to largest
|
|
|
|
Returns:
|
|
segments (List): list of segment masks
|
|
"""
|
|
segments = []
|
|
for x in masks.int().cpu().numpy().astype("uint8"):
|
|
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
|
|
if c:
|
|
if strategy == "concat":
|
|
c = np.concatenate([x.reshape(-1, 2) for x in c])
|
|
elif strategy == "largest":
|
|
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
|
|
else:
|
|
c = np.zeros((0, 2))
|
|
segments.append(c.astype("float32"))
|
|
return segments
|
|
|
|
|
|
def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray:
|
|
"""
|
|
Convert a batch of FP32 torch tensors (0.0-1.0) to a NumPy uint8 array (0-255), changing from BCHW to BHWC layout.
|
|
|
|
Args:
|
|
batch (torch.Tensor): Input tensor batch of shape (Batch, Channels, Height, Width) and dtype torch.float32.
|
|
|
|
Returns:
|
|
(np.ndarray): Output NumPy array batch of shape (Batch, Height, Width, Channels) and dtype uint8.
|
|
"""
|
|
return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).to(torch.uint8).cpu().numpy()
|
|
|
|
|
|
def clean_str(s):
|
|
"""
|
|
Cleans a string by replacing special characters with '_' character.
|
|
|
|
Args:
|
|
s (str): a string needing special characters replaced
|
|
|
|
Returns:
|
|
(str): a string with special characters replaced by an underscore _
|
|
"""
|
|
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
|
|
|