Spaces:
Runtime error
Runtime error
import argparse | |
import glob | |
import os | |
from enum import Enum | |
from typing import List, Optional, Union | |
import norfair | |
import numpy as np | |
import torch | |
import torchvision.ops.boxes as bops | |
from norfair import Detection, draw_absolute_grid | |
DISTANCE_THRESHOLD_BBOX: float = 3.33 | |
DISTANCE_THRESHOLD_CENTROID: int = 30 | |
MAX_DISTANCE: int = 10000 | |
models_path = {"YoloV7": "models/yolov7.pt", "YoloV7 Tiny": "models/yolov7-tiny.pt"} | |
style = {"Bounding box": "bbox", "Centroid": "centroid"} | |
class YOLO: | |
def __init__(self, model_path: str, device: Optional[str] = None): | |
if device is not None and "cuda" in device and not torch.cuda.is_available(): | |
raise Exception("Selected device='cuda', but cuda is not available to Pytorch.") | |
# automatically set device if its None | |
elif device is None: | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
if not os.path.exists(model_path): | |
os.system( | |
f"wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/{os.path.basename(model_path)} -O {model_path}" | |
) | |
# load model | |
try: | |
self.model = torch.hub.load("WongKinYiu/yolov7", "custom", model_path) | |
except: | |
raise Exception("Failed to load model from {}".format(model_path)) | |
def __call__( | |
self, | |
img: Union[str, np.ndarray], | |
conf_threshold: float = 0.25, | |
iou_threshold: float = 0.45, | |
image_size: int = 720, | |
classes: Optional[List[int]] = None, | |
) -> torch.tensor: | |
self.model.conf = conf_threshold | |
self.model.iou = iou_threshold | |
if classes is not None: | |
self.model.classes = classes | |
detections = self.model(img, size=image_size) | |
return detections | |
def euclidean_distance(detection, tracked_object): | |
return np.linalg.norm(detection.points - tracked_object.estimate) | |
def center(points): | |
return [np.mean(np.array(points), axis=0)] | |
def iou_pytorch(detection, tracked_object): | |
# Slower but simplier version of iou | |
detection_points = np.concatenate([detection.points[0], detection.points[1]]) | |
tracked_object_points = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]]) | |
box_a = torch.tensor([detection_points], dtype=torch.float) | |
box_b = torch.tensor([tracked_object_points], dtype=torch.float) | |
iou = bops.box_iou(box_a, box_b) | |
# Since 0 <= IoU <= 1, we define 1/IoU as a distance. | |
# Distance values will be in [1, inf) | |
return np.float(1 / iou if iou else MAX_DISTANCE) | |
def iou(detection, tracked_object): | |
# Detection points will be box A | |
# Tracked objects point will be box B. | |
box_a = np.concatenate([detection.points[0], detection.points[1]]) | |
box_b = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]]) | |
x_a = max(box_a[0], box_b[0]) | |
y_a = max(box_a[1], box_b[1]) | |
x_b = min(box_a[2], box_b[2]) | |
y_b = min(box_a[3], box_b[3]) | |
# Compute the area of intersection rectangle | |
inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1) | |
# Compute the area of both the prediction and tracker | |
# rectangles | |
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1) | |
box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1) | |
# Compute the intersection over union by taking the intersection | |
# area and dividing it by the sum of prediction + tracker | |
# areas - the interesection area | |
iou = inter_area / float(box_a_area + box_b_area - inter_area) | |
# Since 0 <= IoU <= 1, we define 1/IoU as a distance. | |
# Distance values will be in [1, inf) | |
return 1 / iou if iou else (MAX_DISTANCE) | |
def yolo_detections_to_norfair_detections( | |
yolo_detections: torch.tensor, track_points: str = "centroid" # bbox or centroid | |
) -> List[Detection]: | |
"""convert detections_as_xywh to norfair detections""" | |
norfair_detections: List[Detection] = [] | |
if track_points == "centroid": | |
detections_as_xywh = yolo_detections.xywh[0] | |
for detection_as_xywh in detections_as_xywh: | |
centroid = np.array([detection_as_xywh[0].item(), detection_as_xywh[1].item()]) | |
scores = np.array([detection_as_xywh[4].item()]) | |
norfair_detections.append(Detection(points=centroid, scores=scores)) | |
elif track_points == "bbox": | |
detections_as_xyxy = yolo_detections.xyxy[0] | |
for detection_as_xyxy in detections_as_xyxy: | |
bbox = np.array( | |
[ | |
[detection_as_xyxy[0].item(), detection_as_xyxy[1].item()], | |
[detection_as_xyxy[2].item(), detection_as_xyxy[3].item()], | |
] | |
) | |
scores = np.array([detection_as_xyxy[4].item(), detection_as_xyxy[4].item()]) | |
norfair_detections.append(Detection(points=bbox, scores=scores)) | |
return norfair_detections | |
def clean_videos(path: str): | |
# Remove past videos | |
files = glob.glob(f"{path}/*") | |
for file in files: | |
if file.endswith(".mp4"): | |
os.remove(file) | |
def draw( | |
paths_drawer, | |
track_points, | |
frame, | |
detections, | |
tracked_objects, | |
coord_transformations, | |
fix_paths, | |
): | |
if track_points == "centroid": | |
norfair.draw_points(frame, detections) | |
norfair.draw_tracked_objects(frame, tracked_objects) | |
elif track_points == "bbox": | |
norfair.draw_boxes(frame, detections) | |
norfair.draw_tracked_boxes(frame, tracked_objects) | |
if fix_paths: | |
frame = paths_drawer.draw(frame, tracked_objects, coord_transformations) | |
elif paths_drawer is not None: | |
frame = paths_drawer.draw(frame, tracked_objects) | |
return frame | |