limitedonly41 commited on
Commit
79df048
·
verified ·
1 Parent(s): d4518e4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ import gradio as gr
4
+ from ultralytics import YOLO
5
+ import time
6
+
7
+ # Check if MPS (Metal Performance Shaders) is available, otherwise use CPU
8
+ if torch.backends.mps.is_available() and torch.backends.mps.is_built():
9
+ device = torch.device('mps')
10
+ print("MPS available, using MPS.")
11
+ else:
12
+ device = torch.device('cpu')
13
+ print("MPS not available, using CPU.")
14
+
15
+ # Load the YOLOv8 model
16
+ model = YOLO("yolov8n.pt").to(device)
17
+
18
+ # Classes to count: 0 = person, 2 = car
19
+ classes_to_count = [0, 2] # person and car classes for counting
20
+
21
+ # Initialize unique ID storage for each class
22
+ unique_people_ids = set()
23
+ unique_car_ids = set()
24
+
25
+ def process_video(video_input):
26
+ global unique_people_ids, unique_car_ids
27
+ unique_people_ids = set()
28
+ unique_car_ids = set()
29
+
30
+ # Open the input video
31
+ cap = cv2.VideoCapture(video_input)
32
+ assert cap.isOpened(), "Error reading video file"
33
+
34
+ # Get video properties
35
+ w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
36
+
37
+ # Set up video writer to store annotated video as frames are processed
38
+ output_frames = []
39
+
40
+ frame_counter = 0
41
+ frame_skip = 5 # Process every 3rd frame
42
+
43
+ while cap.isOpened():
44
+ success, frame = cap.read()
45
+ if not success:
46
+ break
47
+
48
+ if frame_counter % frame_skip != 0:
49
+ frame_counter += 1
50
+ continue
51
+
52
+ # Calculate video timestamp based on frame number and FPS
53
+ video_time_elapsed = frame_counter / fps
54
+ video_timestamp = time.strftime('%H:%M:%S', time.gmtime(video_time_elapsed))
55
+
56
+ # Run object detection and tracking on the frame
57
+ results = model.track(frame, persist=True, device=device, classes=classes_to_count, verbose=False, conf=0.4)
58
+
59
+ # Initialize counters for current frame
60
+ people_count = 0
61
+ car_count = 0
62
+
63
+ # Process detections to track unique IDs
64
+ for det in results[0].boxes:
65
+ try:
66
+ object_id = int(det.id[0])
67
+ except:
68
+ pass
69
+ if object_id is None:
70
+ continue # Skip objects without an ID
71
+
72
+ if det.cls == 0: # person class
73
+ if object_id not in unique_people_ids:
74
+ unique_people_ids.add(object_id) # Add unique person ID
75
+ people_count += 1
76
+ elif det.cls == 2: # car class
77
+ if object_id not in unique_car_ids:
78
+ unique_car_ids.add(object_id) # Add unique car ID
79
+ car_count += 1
80
+
81
+ # Annotate the frame with the current and total counts of unique objects
82
+ annotated_frame = results[0].plot()
83
+
84
+ # Display unique people and car count on the frame
85
+ cv2.putText(annotated_frame, f'Unique People: {len(unique_people_ids)} | Unique Cars: {len(unique_car_ids)}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
86
+
87
+ # Store the annotated frame
88
+ output_frames.append(annotated_frame)
89
+
90
+ # Increment frame counter
91
+ frame_counter += 1
92
+
93
+ cap.release()
94
+
95
+ # Return processed video frames
96
+ return output_frames
97
+
98
+ def video_pipeline(video_file):
99
+ # Convert video into individual frames
100
+ output_frames = process_video(video_file)
101
+
102
+ # Encode the frames back into a video
103
+ output_video_path = 'output.mp4'
104
+ h, w, _ = output_frames[0].shape
105
+ out = cv2.VideoWriter(output_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 20, (w, h))
106
+ for frame in output_frames:
107
+ out.write(frame)
108
+ out.release()
109
+
110
+ return output_video_path
111
+
112
+ # Gradio Interface
113
+ title = "YOLOv8 Object Tracking with Unique ID Counting"
114
+ description = "Upload a video to detect and count unique people and cars using YOLOv8."
115
+
116
+ interface = gr.Interface(
117
+ fn=video_pipeline,
118
+ inputs=gr.Video(label="Input Video"),
119
+ outputs=gr.Video(label="Processed Video"),
120
+ title=title,
121
+ description=description,
122
+ live=True
123
+ )
124
+
125
+ # Launch Gradio interface
126
+ interface.launch()