Update app.py
Browse files
app.py
CHANGED
@@ -62,8 +62,8 @@ def compute_iou(boxA, boxB):
|
|
62 |
return 0
|
63 |
return interArea / float(boxAArea + boxBArea - interArea)
|
64 |
|
65 |
-
#
|
66 |
-
def custom_nms(preds, iou_threshold=0.
|
67 |
preds = sorted(preds, key=lambda x: x["confidence"], reverse=True)
|
68 |
filtered_preds = []
|
69 |
for pred in preds:
|
@@ -76,8 +76,10 @@ def custom_nms(preds, iou_threshold=0.5):
|
|
76 |
filtered_preds.append(pred)
|
77 |
return filtered_preds
|
78 |
|
79 |
-
# The process_image function
|
80 |
-
#
|
|
|
|
|
81 |
def process_image(job_id, image_path, object_type, multiplier):
|
82 |
try:
|
83 |
jobs[job_id]['progress'] = 10
|
@@ -101,19 +103,19 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
101 |
return
|
102 |
|
103 |
# --- BOX DETECTION ---
|
104 |
-
# Upscale if image
|
105 |
scale_factor = 1
|
106 |
if img_width < 1000 or img_height < 1000:
|
107 |
scale_factor = 2
|
108 |
|
109 |
-
# Use improved parameters: confidence=50 and overlap=
|
110 |
if scale_factor > 1:
|
111 |
upscaled_image = cv2.resize(image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
112 |
temp_path = "upscaled.jpg"
|
113 |
cv2.imwrite(temp_path, upscaled_image)
|
114 |
-
results = box_model.predict(temp_path, confidence=50, overlap=
|
115 |
else:
|
116 |
-
results = box_model.predict(image_path, confidence=50, overlap=
|
117 |
|
118 |
predictions = results.get("predictions", [])
|
119 |
processed_preds = []
|
@@ -130,12 +132,12 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
130 |
width = prediction["width"]
|
131 |
height = prediction["height"]
|
132 |
|
133 |
-
# Convert
|
134 |
x1 = int(round(x - width / 2))
|
135 |
y1 = int(round(y - height / 2))
|
136 |
x2 = int(round(x + width / 2))
|
137 |
y2 = int(round(y + height / 2))
|
138 |
-
# Clamp
|
139 |
x1 = max(0, min(x1, img_width - 1))
|
140 |
y1 = max(0, min(y1, img_height - 1))
|
141 |
x2 = max(0, min(x2, img_width - 1))
|
@@ -148,8 +150,8 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
148 |
except Exception as e:
|
149 |
continue
|
150 |
|
151 |
-
# Apply NMS with
|
152 |
-
box_detections = custom_nms(processed_preds, iou_threshold=0.
|
153 |
jobs[job_id]['progress'] = 60
|
154 |
|
155 |
# --- ARUCO MARKER DETECTION & SIZE CONVERSION ---
|
@@ -173,7 +175,7 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
173 |
width_pixels = max_x - min_x
|
174 |
height_pixels = max_y - min_y
|
175 |
if width_pixels > 0 and height_pixels > 0:
|
176 |
-
# Use the average
|
177 |
conversion_factor = (marker_real_width_cm / width_pixels + marker_real_width_cm / height_pixels) / 2
|
178 |
else:
|
179 |
conversion_factor = None
|
@@ -244,7 +246,7 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
244 |
jobs[job_id]['result'] = {"error": "Error during YOLOv5 inference."}
|
245 |
return
|
246 |
|
247 |
-
#
|
248 |
detection_counts = Counter(det["class"] for det in detection_info)
|
249 |
if detection_counts:
|
250 |
top_text = ", ".join(f"{cls}: {count}" for cls, count in detection_counts.items())
|
|
|
62 |
return 0
|
63 |
return interArea / float(boxAArea + boxBArea - interArea)
|
64 |
|
65 |
+
# Lower the NMS threshold to 0.3 so that adjacent boxes are less likely to be merged.
|
66 |
+
def custom_nms(preds, iou_threshold=0.3):
|
67 |
preds = sorted(preds, key=lambda x: x["confidence"], reverse=True)
|
68 |
filtered_preds = []
|
69 |
for pred in preds:
|
|
|
76 |
filtered_preds.append(pred)
|
77 |
return filtered_preds
|
78 |
|
79 |
+
# The process_image function now uses:
|
80 |
+
# - Roboflow prediction parameters: confidence=50 and a lower overlap=10.
|
81 |
+
# - A custom NMS with IoU threshold of 0.3.
|
82 |
+
# - ArUco marker detection for conversion factor computation.
|
83 |
def process_image(job_id, image_path, object_type, multiplier):
|
84 |
try:
|
85 |
jobs[job_id]['progress'] = 10
|
|
|
103 |
return
|
104 |
|
105 |
# --- BOX DETECTION ---
|
106 |
+
# Upscale if the image is small.
|
107 |
scale_factor = 1
|
108 |
if img_width < 1000 or img_height < 1000:
|
109 |
scale_factor = 2
|
110 |
|
111 |
+
# Use improved parameters: confidence=50 and overlap=10 (lowered overlap).
|
112 |
if scale_factor > 1:
|
113 |
upscaled_image = cv2.resize(image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
114 |
temp_path = "upscaled.jpg"
|
115 |
cv2.imwrite(temp_path, upscaled_image)
|
116 |
+
results = box_model.predict(temp_path, confidence=50, overlap=10).json()
|
117 |
else:
|
118 |
+
results = box_model.predict(image_path, confidence=50, overlap=10).json()
|
119 |
|
120 |
predictions = results.get("predictions", [])
|
121 |
processed_preds = []
|
|
|
132 |
width = prediction["width"]
|
133 |
height = prediction["height"]
|
134 |
|
135 |
+
# Convert center-based coordinates to corner-based bounding box.
|
136 |
x1 = int(round(x - width / 2))
|
137 |
y1 = int(round(y - height / 2))
|
138 |
x2 = int(round(x + width / 2))
|
139 |
y2 = int(round(y + height / 2))
|
140 |
+
# Clamp coordinates within the image.
|
141 |
x1 = max(0, min(x1, img_width - 1))
|
142 |
y1 = max(0, min(y1, img_height - 1))
|
143 |
x2 = max(0, min(x2, img_width - 1))
|
|
|
150 |
except Exception as e:
|
151 |
continue
|
152 |
|
153 |
+
# Apply custom NMS with an IoU threshold of 0.3.
|
154 |
+
box_detections = custom_nms(processed_preds, iou_threshold=0.3)
|
155 |
jobs[job_id]['progress'] = 60
|
156 |
|
157 |
# --- ARUCO MARKER DETECTION & SIZE CONVERSION ---
|
|
|
175 |
width_pixels = max_x - min_x
|
176 |
height_pixels = max_y - min_y
|
177 |
if width_pixels > 0 and height_pixels > 0:
|
178 |
+
# Use the average conversion factor from width and height.
|
179 |
conversion_factor = (marker_real_width_cm / width_pixels + marker_real_width_cm / height_pixels) / 2
|
180 |
else:
|
181 |
conversion_factor = None
|
|
|
246 |
jobs[job_id]['result'] = {"error": "Error during YOLOv5 inference."}
|
247 |
return
|
248 |
|
249 |
+
# Draw summary text on the image
|
250 |
detection_counts = Counter(det["class"] for det in detection_info)
|
251 |
if detection_counts:
|
252 |
top_text = ", ".join(f"{cls}: {count}" for cls, count in detection_counts.items())
|