firesnaker commited on
Commit
0825a2d
1 Parent(s): b2b1277

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +228 -0
app.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ import gradio as gr
3
+
4
+ def greet(name):
5
+ return "Hello " + name + "!!"
6
+
7
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
8
+ iface.launch()
9
+ '''
10
+
11
+ import os
12
+ HOME = os.getcwd()
13
+ print(HOME)
14
+
15
+ #Upload your own video
16
+ SOURCE_VIDEO_PATH = f"{HOME}/testing.mp4"
17
+
18
+ # Pip install method (recommended)
19
+
20
+ !pip install ultralytics
21
+
22
+ from IPython import display
23
+ display.clear_output()
24
+
25
+ import ultralytics
26
+ ultralytics.checks()
27
+
28
+ %cd {HOME}
29
+ !git clone https://github.com/ifzhang/ByteTrack.git
30
+ %cd {HOME}/ByteTrack
31
+
32
+ # workaround related to https://github.com/roboflow/notebooks/issues/80
33
+ !sed -i 's/onnx==1.8.1/onnx==1.9.0/g' requirements.txt
34
+
35
+ !pip3 install -q -r requirements.txt
36
+ !python3 setup.py -q develop
37
+ !pip install -q cython_bbox
38
+ !pip install -q onemetric
39
+ # workaround related to https://github.com/roboflow/notebooks/issues/112 and https://github.com/roboflow/notebooks/issues/106
40
+ !pip install -q loguru lap thop
41
+
42
+ from IPython import display
43
+ display.clear_output()
44
+
45
+
46
+ import sys
47
+ sys.path.append(f"{HOME}/ByteTrack")
48
+
49
+
50
+ import yolox
51
+ print("yolox.__version__:", yolox.__version__)
52
+
53
+ from yolox.tracker.byte_tracker import BYTETracker, STrack
54
+ from onemetric.cv.utils.iou import box_iou_batch
55
+ from dataclasses import dataclass
56
+
57
+
58
+ @dataclass(frozen=True)
59
+ class BYTETrackerArgs:
60
+ track_thresh: float = 0.25
61
+ track_buffer: int = 30
62
+ match_thresh: float = 0.8
63
+ aspect_ratio_thresh: float = 3.0
64
+ min_box_area: float = 1.0
65
+ mot20: bool = False
66
+
67
+ !pip install supervision==0.1.0
68
+
69
+
70
+ from IPython import display
71
+ display.clear_output()
72
+
73
+
74
+ import supervision
75
+ print("supervision.__version__:", supervision.__version__)
76
+
77
+ from supervision.draw.color import ColorPalette
78
+ from supervision.geometry.dataclasses import Point
79
+ from supervision.video.dataclasses import VideoInfo
80
+ from supervision.video.source import get_video_frames_generator
81
+ from supervision.video.sink import VideoSink
82
+ from supervision.notebook.utils import show_frame_in_notebook
83
+ from supervision.tools.detections import Detections, BoxAnnotator
84
+ from supervision.tools.line_counter import LineCounter, LineCounterAnnotator
85
+
86
+ from typing import List
87
+
88
+ import numpy as np
89
+
90
+
91
+ # converts Detections into format that can be consumed by match_detections_with_tracks function
92
+ def detections2boxes(detections: Detections) -> np.ndarray:
93
+ return np.hstack((
94
+ detections.xyxy,
95
+ detections.confidence[:, np.newaxis]
96
+ ))
97
+
98
+
99
+ # converts List[STrack] into format that can be consumed by match_detections_with_tracks function
100
+ def tracks2boxes(tracks: List[STrack]) -> np.ndarray:
101
+ return np.array([
102
+ track.tlbr
103
+ for track
104
+ in tracks
105
+ ], dtype=float)
106
+
107
+
108
+ # matches our bounding boxes with predictions
109
+ def match_detections_with_tracks(
110
+ detections: Detections,
111
+ tracks: List[STrack]
112
+ ) -> Detections:
113
+ if not np.any(detections.xyxy) or len(tracks) == 0:
114
+ return np.empty((0,))
115
+
116
+ tracks_boxes = tracks2boxes(tracks=tracks)
117
+ iou = box_iou_batch(tracks_boxes, detections.xyxy)
118
+ track2detection = np.argmax(iou, axis=1)
119
+
120
+ tracker_ids = [None] * len(detections)
121
+
122
+ for tracker_index, detection_index in enumerate(track2detection):
123
+ if iou[tracker_index, detection_index] != 0:
124
+ tracker_ids[detection_index] = tracks[tracker_index].track_id
125
+
126
+ return tracker_ids
127
+
128
+ # settings
129
+ MODEL = "yolov8x.pt"
130
+
131
+ from ultralytics import YOLO
132
+
133
+ model = YOLO(MODEL)
134
+ model.fuse()
135
+
136
+ # dict maping class_id to class_name
137
+ CLASS_NAMES_DICT = model.model.names
138
+ # class_ids of interest - car, motorcycle, bus and truck
139
+ CLASS_ID = [2, 3, 5, 7]
140
+
141
+ # create frame generator
142
+ generator = get_video_frames_generator(SOURCE_VIDEO_PATH)
143
+ # create instance of BoxAnnotator
144
+ box_annotator = BoxAnnotator(color=ColorPalette(), thickness=4, text_thickness=4, text_scale=2)
145
+ # acquire first video frame
146
+ iterator = iter(generator)
147
+ frame = next(iterator)
148
+ # model prediction on single frame and conversion to supervision Detections
149
+ results = model(frame)
150
+ detections = Detections(
151
+ xyxy=results[0].boxes.xyxy.cpu().numpy(),
152
+ confidence=results[0].boxes.conf.cpu().numpy(),
153
+ class_id=results[0].boxes.cls.cpu().numpy().astype(int)
154
+ )
155
+ # format custom labels
156
+ labels = [
157
+ f"{CLASS_NAMES_DICT[class_id]} {confidence:0.2f}"
158
+ for _, confidence, class_id, tracker_id
159
+ in detections
160
+ ]
161
+ # annotate and display frame
162
+ frame = box_annotator.annotate(frame=frame, detections=detections, labels=labels)
163
+
164
+ %matplotlib inline
165
+ show_frame_in_notebook(frame, (16, 16))
166
+
167
+ # settings
168
+ # Please settings the line for the counting
169
+ LINE_START = Point(50, 430)
170
+ LINE_END = Point(1280-50, 430)
171
+
172
+ TARGET_VIDEO_PATH = f"{HOME}/vehicle-counting-result.mp4"
173
+
174
+ VideoInfo.from_video_path(SOURCE_VIDEO_PATH)
175
+
176
+ from tqdm.notebook import tqdm
177
+
178
+
179
+ # create BYTETracker instance
180
+ byte_tracker = BYTETracker(BYTETrackerArgs())
181
+ # create VideoInfo instance
182
+ video_info = VideoInfo.from_video_path(SOURCE_VIDEO_PATH)
183
+ # create frame generator
184
+ generator = get_video_frames_generator(SOURCE_VIDEO_PATH)
185
+ # create LineCounter instance
186
+ line_counter = LineCounter(start=LINE_START, end=LINE_END)
187
+ # create instance of BoxAnnotator and LineCounterAnnotator
188
+ box_annotator = BoxAnnotator(color=ColorPalette(), thickness=4, text_thickness=4, text_scale=2)
189
+ line_annotator = LineCounterAnnotator(thickness=4, text_thickness=4, text_scale=2)
190
+
191
+ # open target video file
192
+ with VideoSink(TARGET_VIDEO_PATH, video_info) as sink:
193
+ # loop over video frames
194
+ for frame in tqdm(generator, total=video_info.total_frames):
195
+ # model prediction on single frame and conversion to supervision Detections
196
+ results = model(frame)
197
+ detections = Detections(
198
+ xyxy=results[0].boxes.xyxy.cpu().numpy(),
199
+ confidence=results[0].boxes.conf.cpu().numpy(),
200
+ class_id=results[0].boxes.cls.cpu().numpy().astype(int)
201
+ )
202
+ # filtering out detections with unwanted classes
203
+ mask = np.array([class_id in CLASS_ID for class_id in detections.class_id], dtype=bool)
204
+ detections.filter(mask=mask, inplace=True)
205
+ # tracking detections
206
+ tracks = byte_tracker.update(
207
+ output_results=detections2boxes(detections=detections),
208
+ img_info=frame.shape,
209
+ img_size=frame.shape
210
+ )
211
+ tracker_id = match_detections_with_tracks(detections=detections, tracks=tracks)
212
+ detections.tracker_id = np.array(tracker_id)
213
+ # filtering out detections without trackers
214
+ mask = np.array([tracker_id is not None for tracker_id in detections.tracker_id], dtype=bool)
215
+ detections.filter(mask=mask, inplace=True)
216
+ # format custom labels
217
+ labels = [
218
+ f"#{tracker_id} {CLASS_NAMES_DICT[class_id]} {confidence:0.2f}"
219
+ for _, confidence, class_id, tracker_id
220
+ in detections
221
+ ]
222
+ # updating line counter
223
+ line_counter.update(detections=detections)
224
+ # annotate and display frame
225
+ frame = box_annotator.annotate(frame=frame, detections=detections, labels=labels)
226
+ line_annotator.annotate(frame=frame, line_counter=line_counter)
227
+ sink.write_frame(frame)
228
+