Update app.py
Browse files
app.py
CHANGED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import base64
|
5 |
+
from flask import Flask, render_template_string, request, redirect, flash
|
6 |
+
import roboflow
|
7 |
+
import torch
|
8 |
+
from collections import Counter
|
9 |
+
|
10 |
+
app = Flask(__name__)
|
11 |
+
app.secret_key = 'your_secret_key' # Replace with a secure secret key
|
12 |
+
|
13 |
+
#########################################
|
14 |
+
# 1. Initialize the Models
|
15 |
+
#########################################
|
16 |
+
|
17 |
+
# --- Roboflow Box Detection Model ---
|
18 |
+
API_KEY = "wLjPoPYaLmrqCIOFA0RH" # Replace with your actual API key
|
19 |
+
PROJECT_ID = "base-model-box-r4suo-8lkk1-6dbqh" # Replace with your Roboflow project ID
|
20 |
+
VERSION_NUMBER = "2" # Replace with your trained model version number
|
21 |
+
|
22 |
+
rf = roboflow.Roboflow(api_key=API_KEY)
|
23 |
+
workspace = rf.workspace()
|
24 |
+
project = workspace.project(PROJECT_ID)
|
25 |
+
version = project.version(VERSION_NUMBER)
|
26 |
+
box_model = version.model # This model is trained for detecting boxes
|
27 |
+
|
28 |
+
# --- YOLOv5 Pretrained Model for Persons & Cars ---
|
29 |
+
# Using Ultralytics YOLOv5s (pretrained) from Torch Hub
|
30 |
+
yolov5_model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
|
31 |
+
# We'll filter YOLO detections to only include persons and cars.
|
32 |
+
YOLO_FILTER_CLASSES = {"person", "car"}
|
33 |
+
|
34 |
+
#########################################
|
35 |
+
# 2. Helper Functions
|
36 |
+
#########################################
|
37 |
+
|
38 |
+
def compute_iou(boxA, boxB):
|
39 |
+
xA = max(boxA[0], boxB[0])
|
40 |
+
yA = max(boxA[1], boxB[1])
|
41 |
+
xB = min(boxA[2], boxB[2])
|
42 |
+
yB = min(boxA[3], boxB[3])
|
43 |
+
interWidth = max(0, xB - xA)
|
44 |
+
interHeight = max(0, yB - yA)
|
45 |
+
interArea = interWidth * interHeight
|
46 |
+
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
|
47 |
+
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
|
48 |
+
if boxAArea + boxBArea - interArea == 0:
|
49 |
+
return 0
|
50 |
+
return interArea / float(boxAArea + boxBArea - interArea)
|
51 |
+
|
52 |
+
def custom_nms(preds, iou_threshold=0.3):
|
53 |
+
preds = sorted(preds, key=lambda x: x["confidence"], reverse=True)
|
54 |
+
filtered_preds = []
|
55 |
+
for pred in preds:
|
56 |
+
keep = True
|
57 |
+
for kept in filtered_preds:
|
58 |
+
if compute_iou(pred["box"], kept["box"]) > iou_threshold:
|
59 |
+
keep = False
|
60 |
+
break
|
61 |
+
if keep:
|
62 |
+
filtered_preds.append(pred)
|
63 |
+
return filtered_preds
|
64 |
+
|
65 |
+
def process_image(image_path):
|
66 |
+
"""
|
67 |
+
Process the uploaded image using both detection pipelines:
|
68 |
+
(a) Box detection via Roboflow (with measurement using an ArUco marker).
|
69 |
+
(b) YOLOv5 detection for persons and cars.
|
70 |
+
Returns the annotated image and a list of detection info dictionaries.
|
71 |
+
"""
|
72 |
+
image = cv2.imread(image_path)
|
73 |
+
if image is None:
|
74 |
+
return None, "Could not read the image."
|
75 |
+
img_height, img_width = image.shape[:2]
|
76 |
+
|
77 |
+
detection_info = [] # List to hold all detection results for display
|
78 |
+
|
79 |
+
# --- (a) Roboflow Box Detection & Measurement ---
|
80 |
+
results = box_model.predict(image_path, confidence=50, overlap=30).json()
|
81 |
+
predictions = results.get("predictions", [])
|
82 |
+
processed_preds = []
|
83 |
+
for prediction in predictions:
|
84 |
+
x, y, width, height = prediction["x"], prediction["y"], prediction["width"], prediction["height"]
|
85 |
+
x1 = int(round(x - width / 2))
|
86 |
+
y1 = int(round(y - height / 2))
|
87 |
+
x2 = int(round(x + width / 2))
|
88 |
+
y2 = int(round(y + height / 2))
|
89 |
+
# Clamp coordinates to image dimensions
|
90 |
+
x1 = max(0, min(x1, img_width - 1))
|
91 |
+
y1 = max(0, min(y1, img_height - 1))
|
92 |
+
x2 = max(0, min(x2, img_width - 1))
|
93 |
+
y2 = max(0, min(y2, img_height - 1))
|
94 |
+
processed_preds.append({
|
95 |
+
"box": (x1, y1, x2, y2),
|
96 |
+
"class": prediction["class"],
|
97 |
+
"confidence": prediction["confidence"]
|
98 |
+
})
|
99 |
+
box_detections = custom_nms(processed_preds, iou_threshold=0.3)
|
100 |
+
|
101 |
+
# Detect ArUco marker for measurement (only applicable for boxes)
|
102 |
+
marker_real_width_cm = 10.0 # The marker is 10cm x 10cm
|
103 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
104 |
+
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
|
105 |
+
aruco_params = cv2.aruco.DetectorParameters()
|
106 |
+
corners, ids, _ = cv2.aruco.detectMarkers(gray, aruco_dict, parameters=aruco_params)
|
107 |
+
if ids is not None and len(corners) > 0:
|
108 |
+
marker_corners = corners[0].reshape((4, 2))
|
109 |
+
cv2.aruco.drawDetectedMarkers(image, corners, ids)
|
110 |
+
marker_width_pixels = np.linalg.norm(marker_corners[0] - marker_corners[1])
|
111 |
+
marker_height_pixels = np.linalg.norm(marker_corners[1] - marker_corners[2])
|
112 |
+
marker_pixel_size = (marker_width_pixels + marker_height_pixels) / 2.0
|
113 |
+
conversion_factor = marker_real_width_cm / marker_pixel_size
|
114 |
+
else:
|
115 |
+
conversion_factor = None
|
116 |
+
|
117 |
+
# Draw box detections and record measurement info (only for boxes)
|
118 |
+
for pred in box_detections:
|
119 |
+
x1, y1, x2, y2 = pred["box"]
|
120 |
+
label = pred["class"]
|
121 |
+
confidence = pred["confidence"]
|
122 |
+
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
123 |
+
if conversion_factor is not None:
|
124 |
+
box_width_pixels = x2 - x1
|
125 |
+
box_height_pixels = y2 - y1
|
126 |
+
box_width_cm = box_width_pixels * conversion_factor
|
127 |
+
box_height_cm = box_height_pixels * conversion_factor
|
128 |
+
size_text = f"{box_width_cm:.1f}x{box_height_cm:.1f} cm"
|
129 |
+
detection_info.append({
|
130 |
+
"class": label,
|
131 |
+
"confidence": f"{confidence:.2f}",
|
132 |
+
"width_cm": f"{box_width_cm:.1f}",
|
133 |
+
"height_cm": f"{box_height_cm:.1f}"
|
134 |
+
})
|
135 |
+
else:
|
136 |
+
size_text = ""
|
137 |
+
detection_info.append({
|
138 |
+
"class": label,
|
139 |
+
"confidence": f"{confidence:.2f}",
|
140 |
+
"width_cm": "N/A",
|
141 |
+
"height_cm": "N/A"
|
142 |
+
})
|
143 |
+
text = f"{label} ({confidence:.2f}) {size_text}"
|
144 |
+
(text_width, text_height), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
|
145 |
+
cv2.rectangle(image, (x1, y1 - text_height - baseline - 5), (x1 + text_width, y1 - 5), (0, 255, 0), -1)
|
146 |
+
cv2.putText(image, text, (x1, y1 - 5 - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
|
147 |
+
|
148 |
+
# --- (b) YOLOv5 for Persons & Cars ---
|
149 |
+
# Convert image to RGB for YOLO (it expects RGB)
|
150 |
+
img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
151 |
+
yolo_results = yolov5_model(img_rgb)
|
152 |
+
df = yolo_results.pandas().xyxy[0]
|
153 |
+
for _, row in df.iterrows():
|
154 |
+
if row['name'] in YOLO_FILTER_CLASSES:
|
155 |
+
xmin = int(row['xmin'])
|
156 |
+
ymin = int(row['ymin'])
|
157 |
+
xmax = int(row['xmax'])
|
158 |
+
ymax = int(row['ymax'])
|
159 |
+
conf = row['confidence']
|
160 |
+
label = row['name']
|
161 |
+
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
|
162 |
+
text = f"{label} ({conf:.2f})"
|
163 |
+
(text_width, text_height), baseline = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
|
164 |
+
cv2.rectangle(image, (xmin, ymin - text_height - baseline - 5), (xmin + text_width, ymin - 5), (255, 0, 0), -1)
|
165 |
+
cv2.putText(image, text, (xmin, ymin - 5 - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
|
166 |
+
detection_info.append({
|
167 |
+
"class": label,
|
168 |
+
"confidence": f"{conf:.2f}",
|
169 |
+
"width_cm": "N/A",
|
170 |
+
"height_cm": "N/A"
|
171 |
+
})
|
172 |
+
|
173 |
+
# --- Build Top Summary Text ---
|
174 |
+
detection_counts = Counter(det["class"] for det in detection_info)
|
175 |
+
if detection_counts:
|
176 |
+
top_text = ", ".join(f"{cls}: {count}" for cls, count in detection_counts.items())
|
177 |
+
(info_width, info_height), info_baseline = cv2.getTextSize(top_text, cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
|
178 |
+
cv2.rectangle(image, (5, 5), (5 + info_width, 5 + info_height + info_baseline), (0, 255, 0), -1)
|
179 |
+
cv2.putText(image, top_text, (5, 5 + info_height), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
|
180 |
+
|
181 |
+
return image, detection_info
|
182 |
+
|
183 |
+
#########################################
|
184 |
+
# 3. Flask Routes
|
185 |
+
#########################################
|
186 |
+
|
187 |
+
@app.route('/', methods=['GET', 'POST'])
|
188 |
+
def index():
|
189 |
+
image_data = None
|
190 |
+
detection_info = None
|
191 |
+
if request.method == 'POST':
|
192 |
+
if 'file' not in request.files:
|
193 |
+
flash('No file part')
|
194 |
+
return redirect(request.url)
|
195 |
+
file = request.files['file']
|
196 |
+
if file.filename == '':
|
197 |
+
flash('No selected file')
|
198 |
+
return redirect(request.url)
|
199 |
+
upload_path = "uploaded.jpg"
|
200 |
+
file.save(upload_path)
|
201 |
+
processed_image, detection_info = process_image(upload_path)
|
202 |
+
if processed_image is None:
|
203 |
+
flash("Error processing image.")
|
204 |
+
else:
|
205 |
+
retval, buffer = cv2.imencode('.jpg', processed_image)
|
206 |
+
image_data = base64.b64encode(buffer).decode('utf-8')
|
207 |
+
os.remove(upload_path)
|
208 |
+
return render_template_string('''
|
209 |
+
<!doctype html>
|
210 |
+
<html>
|
211 |
+
<head>
|
212 |
+
<title>Multi-Detection & Measurement</title>
|
213 |
+
<!-- Bootstrap CSS -->
|
214 |
+
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/css/bootstrap.min.css">
|
215 |
+
<style>
|
216 |
+
body {
|
217 |
+
background-color: #f8f9fa;
|
218 |
+
font-family: "Segoe UI", Tahoma, Geneva, Verdana, sans-serif;
|
219 |
+
}
|
220 |
+
.container {
|
221 |
+
margin-top: 30px;
|
222 |
+
}
|
223 |
+
.header {
|
224 |
+
text-align: center;
|
225 |
+
margin-bottom: 30px;
|
226 |
+
}
|
227 |
+
.card {
|
228 |
+
margin-bottom: 30px;
|
229 |
+
}
|
230 |
+
.result-img {
|
231 |
+
width: 100%;
|
232 |
+
border: 1px solid #ddd;
|
233 |
+
padding: 5px;
|
234 |
+
}
|
235 |
+
.table-responsive {
|
236 |
+
margin-top: 20px;
|
237 |
+
}
|
238 |
+
.footer {
|
239 |
+
text-align: center;
|
240 |
+
font-size: 0.9em;
|
241 |
+
color: #777;
|
242 |
+
margin-top: 30px;
|
243 |
+
}
|
244 |
+
</style>
|
245 |
+
</head>
|
246 |
+
<body>
|
247 |
+
<div class="container">
|
248 |
+
<h1 class="header">Multi-Detection & Measurement</h1>
|
249 |
+
<!-- Upload Form -->
|
250 |
+
<div class="card">
|
251 |
+
<div class="card-body">
|
252 |
+
<form method="post" enctype="multipart/form-data">
|
253 |
+
<div class="form-group">
|
254 |
+
<label for="file">Choose an image to upload:</label>
|
255 |
+
<input type="file" class="form-control-file" name="file" accept="image/*" id="file">
|
256 |
+
</div>
|
257 |
+
<button type="submit" class="btn btn-primary">Upload</button>
|
258 |
+
</form>
|
259 |
+
{% with messages = get_flashed_messages() %}
|
260 |
+
{% if messages %}
|
261 |
+
<div class="alert alert-danger mt-3">
|
262 |
+
<ul>
|
263 |
+
{% for message in messages %}
|
264 |
+
<li>{{ message }}</li>
|
265 |
+
{% endfor %}
|
266 |
+
</ul>
|
267 |
+
</div>
|
268 |
+
{% endif %}
|
269 |
+
{% endwith %}
|
270 |
+
</div>
|
271 |
+
</div>
|
272 |
+
{% if image_data or detection_info %}
|
273 |
+
<div class="row">
|
274 |
+
<div class="col-md-8">
|
275 |
+
<div class="card">
|
276 |
+
<div class="card-header">
|
277 |
+
Processed Image
|
278 |
+
</div>
|
279 |
+
<div class="card-body">
|
280 |
+
<img src="data:image/jpeg;base64,{{ image_data }}" alt="Processed Image" class="result-img">
|
281 |
+
</div>
|
282 |
+
</div>
|
283 |
+
</div>
|
284 |
+
<div class="col-md-4">
|
285 |
+
<div class="card">
|
286 |
+
<div class="card-header">
|
287 |
+
Detection Results
|
288 |
+
</div>
|
289 |
+
<div class="card-body">
|
290 |
+
<p>Total Results: <strong>{{ detection_info|length }}</strong></p>
|
291 |
+
<div class="table-responsive">
|
292 |
+
<table class="table table-striped table-bordered">
|
293 |
+
<thead class="thead-dark">
|
294 |
+
<tr>
|
295 |
+
<th>#</th>
|
296 |
+
<th>Class</th>
|
297 |
+
<th>Confidence</th>
|
298 |
+
<th>Width (cm)</th>
|
299 |
+
<th>Height (cm)</th>
|
300 |
+
</tr>
|
301 |
+
</thead>
|
302 |
+
<tbody>
|
303 |
+
{% for det in detection_info %}
|
304 |
+
<tr>
|
305 |
+
<td>{{ loop.index }}</td>
|
306 |
+
<td>{{ det.class }}</td>
|
307 |
+
<td>{{ det.confidence }}</td>
|
308 |
+
<td>{{ det.width_cm }}</td>
|
309 |
+
<td>{{ det.height_cm }}</td>
|
310 |
+
</tr>
|
311 |
+
{% endfor %}
|
312 |
+
</tbody>
|
313 |
+
</table>
|
314 |
+
</div>
|
315 |
+
</div>
|
316 |
+
</div>
|
317 |
+
</div>
|
318 |
+
</div>
|
319 |
+
{% endif %}
|
320 |
+
<div class="footer">
|
321 |
+
<p>© 2023 Multi-Detection App. All rights reserved.</p>
|
322 |
+
</div>
|
323 |
+
</div>
|
324 |
+
<!-- Bootstrap JS and dependencies -->
|
325 |
+
<script src="https://code.jquery.com/jquery-3.5.1.slim.min.js"></script>
|
326 |
+
<script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/umd/popper.min.js"></script>
|
327 |
+
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.5.2/js/bootstrap.min.js"></script>
|
328 |
+
</body>
|
329 |
+
</html>
|
330 |
+
''', image_data=image_data, detection_info=detection_info)
|
331 |
+
|
332 |
+
#########################################
|
333 |
+
# Run the App
|
334 |
+
#########################################
|
335 |
+
|
336 |
+
if __name__ == '__main__':
|
337 |
+
app.run()
|