Update app.py
Browse files
app.py
CHANGED
@@ -62,7 +62,8 @@ def compute_iou(boxA, boxB):
|
|
62 |
return 0
|
63 |
return interArea / float(boxAArea + boxBArea - interArea)
|
64 |
|
65 |
-
|
|
|
66 |
preds = sorted(preds, key=lambda x: x["confidence"], reverse=True)
|
67 |
filtered_preds = []
|
68 |
for pred in preds:
|
@@ -75,13 +76,12 @@ def custom_nms(preds, iou_threshold=0.7):
|
|
75 |
filtered_preds.append(pred)
|
76 |
return filtered_preds
|
77 |
|
78 |
-
#
|
79 |
-
#
|
80 |
-
# (modified to compute a conversion factor from the marker’s bounding box).
|
81 |
def process_image(job_id, image_path, object_type, multiplier):
|
82 |
try:
|
83 |
jobs[job_id]['progress'] = 10
|
84 |
-
# Load
|
85 |
image = cv2.imread(image_path)
|
86 |
if image is None:
|
87 |
jobs[job_id]['progress'] = 100
|
@@ -90,7 +90,7 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
90 |
|
91 |
jobs[job_id]['progress'] = 20
|
92 |
img_height, img_width = image.shape[:2]
|
93 |
-
#
|
94 |
thickness = max(2, int(min(img_width, img_height) / 300)) * multiplier
|
95 |
detection_info = []
|
96 |
|
@@ -101,19 +101,19 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
101 |
return
|
102 |
|
103 |
# --- BOX DETECTION ---
|
104 |
-
# Upscale
|
105 |
scale_factor = 1
|
106 |
if img_width < 1000 or img_height < 1000:
|
107 |
scale_factor = 2
|
108 |
|
109 |
-
# Use improved
|
110 |
if scale_factor > 1:
|
111 |
upscaled_image = cv2.resize(image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
112 |
temp_path = "upscaled.jpg"
|
113 |
cv2.imwrite(temp_path, upscaled_image)
|
114 |
-
results = box_model.predict(temp_path, confidence=50, overlap=
|
115 |
else:
|
116 |
-
results = box_model.predict(image_path, confidence=50, overlap=
|
117 |
|
118 |
predictions = results.get("predictions", [])
|
119 |
processed_preds = []
|
@@ -130,12 +130,12 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
130 |
width = prediction["width"]
|
131 |
height = prediction["height"]
|
132 |
|
133 |
-
# Convert from center
|
134 |
x1 = int(round(x - width / 2))
|
135 |
y1 = int(round(y - height / 2))
|
136 |
x2 = int(round(x + width / 2))
|
137 |
y2 = int(round(y + height / 2))
|
138 |
-
# Clamp to image dimensions
|
139 |
x1 = max(0, min(x1, img_width - 1))
|
140 |
y1 = max(0, min(y1, img_height - 1))
|
141 |
x2 = max(0, min(x2, img_width - 1))
|
@@ -148,16 +148,14 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
148 |
except Exception as e:
|
149 |
continue
|
150 |
|
151 |
-
# Apply
|
152 |
-
box_detections = custom_nms(processed_preds, iou_threshold=0.
|
153 |
jobs[job_id]['progress'] = 60
|
154 |
|
155 |
# --- ARUCO MARKER DETECTION & SIZE CONVERSION ---
|
156 |
-
|
157 |
-
marker_real_width_cm = 5.0
|
158 |
try:
|
159 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
160 |
-
# Use the DICT_6X6_250 dictionary (as in your current prompt)
|
161 |
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
|
162 |
if hasattr(cv2.aruco, 'DetectorParameters_create'):
|
163 |
aruco_params = cv2.aruco.DetectorParameters_create()
|
@@ -167,7 +165,7 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
167 |
if ids is not None and len(corners) > 0:
|
168 |
marker_corners = corners[0].reshape((4, 2))
|
169 |
cv2.aruco.drawDetectedMarkers(image, corners, ids)
|
170 |
-
# Compute the bounding box
|
171 |
min_x = np.min(marker_corners[:, 0])
|
172 |
max_x = np.max(marker_corners[:, 0])
|
173 |
min_y = np.min(marker_corners[:, 1])
|
@@ -175,7 +173,7 @@ def process_image(job_id, image_path, object_type, multiplier):
|
|
175 |
width_pixels = max_x - min_x
|
176 |
height_pixels = max_y - min_y
|
177 |
if width_pixels > 0 and height_pixels > 0:
|
178 |
-
# Use the average
|
179 |
conversion_factor = (marker_real_width_cm / width_pixels + marker_real_width_cm / height_pixels) / 2
|
180 |
else:
|
181 |
conversion_factor = None
|
|
|
62 |
return 0
|
63 |
return interArea / float(boxAArea + boxBArea - interArea)
|
64 |
|
65 |
+
# Lowering the NMS threshold to 0.5 (from 0.7) to allow more distinct boxes when they are adjacent.
|
66 |
+
def custom_nms(preds, iou_threshold=0.5):
|
67 |
preds = sorted(preds, key=lambda x: x["confidence"], reverse=True)
|
68 |
filtered_preds = []
|
69 |
for pred in preds:
|
|
|
76 |
filtered_preds.append(pred)
|
77 |
return filtered_preds
|
78 |
|
79 |
+
# The process_image function merges robust box detection (with updated prediction parameters)
|
80 |
+
# and ArUco marker detection with refined conversion factor computation.
|
|
|
81 |
def process_image(job_id, image_path, object_type, multiplier):
|
82 |
try:
|
83 |
jobs[job_id]['progress'] = 10
|
84 |
+
# Load the original image
|
85 |
image = cv2.imread(image_path)
|
86 |
if image is None:
|
87 |
jobs[job_id]['progress'] = 100
|
|
|
90 |
|
91 |
jobs[job_id]['progress'] = 20
|
92 |
img_height, img_width = image.shape[:2]
|
93 |
+
# Set dynamic thickness based on image size and multiplier.
|
94 |
thickness = max(2, int(min(img_width, img_height) / 300)) * multiplier
|
95 |
detection_info = []
|
96 |
|
|
|
101 |
return
|
102 |
|
103 |
# --- BOX DETECTION ---
|
104 |
+
# Upscale if image dimensions are small.
|
105 |
scale_factor = 1
|
106 |
if img_width < 1000 or img_height < 1000:
|
107 |
scale_factor = 2
|
108 |
|
109 |
+
# Use improved parameters: confidence=50 and overlap=20.
|
110 |
if scale_factor > 1:
|
111 |
upscaled_image = cv2.resize(image, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
|
112 |
temp_path = "upscaled.jpg"
|
113 |
cv2.imwrite(temp_path, upscaled_image)
|
114 |
+
results = box_model.predict(temp_path, confidence=50, overlap=20).json()
|
115 |
else:
|
116 |
+
results = box_model.predict(image_path, confidence=50, overlap=20).json()
|
117 |
|
118 |
predictions = results.get("predictions", [])
|
119 |
processed_preds = []
|
|
|
130 |
width = prediction["width"]
|
131 |
height = prediction["height"]
|
132 |
|
133 |
+
# Convert from center coordinates to corner coordinates.
|
134 |
x1 = int(round(x - width / 2))
|
135 |
y1 = int(round(y - height / 2))
|
136 |
x2 = int(round(x + width / 2))
|
137 |
y2 = int(round(y + height / 2))
|
138 |
+
# Clamp to image dimensions.
|
139 |
x1 = max(0, min(x1, img_width - 1))
|
140 |
y1 = max(0, min(y1, img_height - 1))
|
141 |
x2 = max(0, min(x2, img_width - 1))
|
|
|
148 |
except Exception as e:
|
149 |
continue
|
150 |
|
151 |
+
# Apply NMS with a lower IoU threshold (0.5) to separate adjacent boxes.
|
152 |
+
box_detections = custom_nms(processed_preds, iou_threshold=0.5)
|
153 |
jobs[job_id]['progress'] = 60
|
154 |
|
155 |
# --- ARUCO MARKER DETECTION & SIZE CONVERSION ---
|
156 |
+
marker_real_width_cm = 5.0 # The printed marker is 5 cm x 5 cm.
|
|
|
157 |
try:
|
158 |
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
159 |
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
|
160 |
if hasattr(cv2.aruco, 'DetectorParameters_create'):
|
161 |
aruco_params = cv2.aruco.DetectorParameters_create()
|
|
|
165 |
if ids is not None and len(corners) > 0:
|
166 |
marker_corners = corners[0].reshape((4, 2))
|
167 |
cv2.aruco.drawDetectedMarkers(image, corners, ids)
|
168 |
+
# Compute the marker's bounding box.
|
169 |
min_x = np.min(marker_corners[:, 0])
|
170 |
max_x = np.max(marker_corners[:, 0])
|
171 |
min_y = np.min(marker_corners[:, 1])
|
|
|
173 |
width_pixels = max_x - min_x
|
174 |
height_pixels = max_y - min_y
|
175 |
if width_pixels > 0 and height_pixels > 0:
|
176 |
+
# Use the average of the width and height conversion factors.
|
177 |
conversion_factor = (marker_real_width_cm / width_pixels + marker_real_width_cm / height_pixels) / 2
|
178 |
else:
|
179 |
conversion_factor = None
|