point_radius=45,
Browse filesmax_angle=15,
extend=25,
merge_th=100.0,
min_missing_distance=1000.0,
scale_estimation_coefficient=2.54,
clustering_eps=150,
interpolation_radius=10000,
point_radius_scale=0.5,
# dist_coeff=0,
pointcloud_depth_coeff=1.005,
- handcrafted_solution.py +16 -21
- script.py +4 -4
handcrafted_solution.py
CHANGED
@@ -105,7 +105,7 @@ def clean_image(image_gestalt) -> np.ndarray:
|
|
105 |
return image_gestalt
|
106 |
|
107 |
|
108 |
-
def get_vertices(image_gestalt, *, color_range=
|
109 |
### detects the apex and eave end and flashing end points
|
110 |
apex_mask = cv2.inRange(image_gestalt, apex_color - color_range, apex_color + color_range)
|
111 |
eave_end_point_mask = cv2.inRange(image_gestalt, eave_end_point - color_range, eave_end_point + color_range)
|
@@ -172,7 +172,7 @@ def get_lines_and_directions(gest_seg_np, edge_class, *, color_range=4., rho, th
|
|
172 |
|
173 |
# Run Hough on edge detected image
|
174 |
# Output "lines" is an array containing endpoints of detected line segments
|
175 |
-
cv2.GaussianBlur(mask, (11, 11), 0, mask)
|
176 |
lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
|
177 |
min_line_length, max_line_gap)
|
178 |
|
@@ -205,7 +205,7 @@ def infer_missing_vertices(ridge_edges, rake_edges):
|
|
205 |
rake_ends = np.concatenate([rake_edges[:, 2:], rake_edges[:, :2]])
|
206 |
ridge_ends = KDTree(ridge_ends)
|
207 |
rake_ends = KDTree(rake_ends)
|
208 |
-
missing_candidates = rake_ends.query_ball_tree(ridge_ends,
|
209 |
missing_candidates = np.concatenate([*missing_candidates])
|
210 |
missing_candidates = np.unique(missing_candidates).astype(np.int32)
|
211 |
|
@@ -231,9 +231,6 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *,
|
|
231 |
# missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
232 |
# vertices = np.concatenate([vertices, missed_vertices])
|
233 |
|
234 |
-
if len(vertices) < 2:
|
235 |
-
return [], []
|
236 |
-
|
237 |
|
238 |
edges = []
|
239 |
line_directions = []
|
@@ -272,6 +269,8 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *,
|
|
272 |
inferred_vertices = infer_missing_vertices(ridge_edges, rake_edges)
|
273 |
missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
274 |
vertices = np.concatenate([vertices, missed_vertices])
|
|
|
|
|
275 |
|
276 |
vertex_size = np.full(len(vertices), point_radius/2)
|
277 |
apex_radii *= point_radius_scale
|
@@ -294,7 +293,9 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *,
|
|
294 |
# 'post',
|
295 |
'valley',
|
296 |
'hip',
|
297 |
-
'transition_line'
|
|
|
|
|
298 |
class_edges, class_directions = get_lines_and_directions(gest_seg_np, edge_class,
|
299 |
rho=rho,
|
300 |
theta=theta,
|
@@ -308,9 +309,10 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *,
|
|
308 |
line_directions.append(class_directions)
|
309 |
|
310 |
edges = np.concatenate(edges).astype(np.float64)
|
311 |
-
line_directions = np.concatenate(line_directions).astype(np.float64)
|
312 |
if len(edges) < 1:
|
313 |
return [], []
|
|
|
|
|
314 |
# calculate the distances between the vertices and the edge ends
|
315 |
|
316 |
begin_edges = KDTree(edges[:, :2])
|
@@ -395,7 +397,7 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, *,
|
|
395 |
def get_uv_depth(vertices, depth):
|
396 |
'''Get the depth of the vertices from the depth image'''
|
397 |
|
398 |
-
|
399 |
uv = np.array([v['xy'] for v in vertices])
|
400 |
uv_int = uv.astype(np.int32)
|
401 |
H, W = depth.shape[:2]
|
@@ -440,10 +442,6 @@ def merge_vertices_3d(vert_edge_per_image, merge_th=0.1, **kwargs):
|
|
440 |
|
441 |
all_3d_vertices = np.concatenate(all_3d_vertices, axis=0)
|
442 |
|
443 |
-
# dbscan = DBSCAN(eps=merge_th, min_samples=1).fit(all_3d_vertices)
|
444 |
-
# print(dbscan.core_sample_indices_)
|
445 |
-
# print(dbscan.labels_[dbscan.core_sample_indices_])
|
446 |
-
# print (connections_3d)
|
447 |
distmat = cdist(all_3d_vertices, all_3d_vertices)
|
448 |
types = np.array(types).reshape(-1, 1)
|
449 |
same_types = cdist(types, types)
|
@@ -471,16 +469,13 @@ def merge_vertices_3d(vert_edge_per_image, merge_th=0.1, **kwargs):
|
|
471 |
new_vertices.append(all_3d_vertices[idxs].mean(axis=0))
|
472 |
for idx in idxs:
|
473 |
old_idx_to_new[idx] = count
|
474 |
-
# print (connections_3d)
|
475 |
new_vertices = np.array(new_vertices)
|
476 |
-
# print (connections_3d)
|
477 |
for conn in connections_3d:
|
478 |
new_con = sorted((old_idx_to_new[conn[0]], old_idx_to_new[conn[1]]))
|
479 |
if new_con[0] == new_con[1]:
|
480 |
continue
|
481 |
if new_con not in new_connections:
|
482 |
new_connections.append(new_con)
|
483 |
-
# print (f'{len(new_vertices)} left after merging {len(all_3d_vertices)} with {th=}')
|
484 |
return new_vertices, new_connections
|
485 |
|
486 |
|
@@ -515,9 +510,7 @@ def clean_points3d(entry, clustering_eps):
|
|
515 |
point_keys = [k for k, v in entry["points3d"].items()]
|
516 |
point_keys = np.array(point_keys)
|
517 |
|
518 |
-
|
519 |
-
|
520 |
-
clustered = DBSCAN(eps=clustering_eps, min_samples=10).fit(points).labels_
|
521 |
clustered_indices = np.argsort(clustered)
|
522 |
|
523 |
points = points[clustered_indices]
|
@@ -601,6 +594,8 @@ def predict(entry, visualize=False,
|
|
601 |
vert_edge_per_image[i] = np.empty((0, 2)), [], np.empty((0, 3))
|
602 |
continue
|
603 |
depth_np = np.array(depthcm) / scale_estimation_coefficient
|
|
|
|
|
604 |
uv, depth_vert_from_depth_map = get_uv_depth(vertices, depth_np)
|
605 |
try:
|
606 |
image = image_dict[imagekey]
|
@@ -624,7 +619,7 @@ def predict(entry, visualize=False,
|
|
624 |
#Revert to the depthmap
|
625 |
depthmap_used = True
|
626 |
|
627 |
-
|
628 |
|
629 |
xy_local = np.ones((len(uv), 3))
|
630 |
xy_local[:, 0] = (uv[:, 0] - K[0, 2]) / K[0, 0]
|
@@ -657,7 +652,6 @@ def predict(entry, visualize=False,
|
|
657 |
vertices_3d_local[depth_vert_nan_idxs] /= norm_factor_max
|
658 |
vertices_3d_local[~np.isin(np.arange(len(vertices_3d_local)), depth_vert_nan_idxs)] /= norm_factor_min
|
659 |
|
660 |
-
# vertices_3d_local = depth_vert[..., None] * (xy_local / norm_factor)
|
661 |
world_to_cam = np.eye(4)
|
662 |
world_to_cam[:3, :3] = R
|
663 |
world_to_cam[:3, 3] = t
|
@@ -674,6 +668,7 @@ def predict(entry, visualize=False,
|
|
674 |
vert_edge_per_image[i] = vertices, connections, vertices_3d
|
675 |
all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image, **kwargs)
|
676 |
all_3d_vertices_clean, connections_3d_clean = all_3d_vertices, connections_3d
|
|
|
677 |
# highest_edges = np.argpartition(all_3d_vertices_clean[:, 1], 4)[:4].tolist()
|
678 |
#
|
679 |
# connections_3d_clean.append(highest_edges[:2])
|
|
|
105 |
return image_gestalt
|
106 |
|
107 |
|
108 |
+
def get_vertices(image_gestalt, *, color_range=3.5, dialations=2, erosions=1, kernel_size=11):
|
109 |
### detects the apex and eave end and flashing end points
|
110 |
apex_mask = cv2.inRange(image_gestalt, apex_color - color_range, apex_color + color_range)
|
111 |
eave_end_point_mask = cv2.inRange(image_gestalt, eave_end_point - color_range, eave_end_point + color_range)
|
|
|
172 |
|
173 |
# Run Hough on edge detected image
|
174 |
# Output "lines" is an array containing endpoints of detected line segments
|
175 |
+
# cv2.GaussianBlur(mask, (11, 11), 0, mask)
|
176 |
lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
|
177 |
min_line_length, max_line_gap)
|
178 |
|
|
|
205 |
rake_ends = np.concatenate([rake_edges[:, 2:], rake_edges[:, :2]])
|
206 |
ridge_ends = KDTree(ridge_ends)
|
207 |
rake_ends = KDTree(rake_ends)
|
208 |
+
missing_candidates = rake_ends.query_ball_tree(ridge_ends, 10)
|
209 |
missing_candidates = np.concatenate([*missing_candidates])
|
210 |
missing_candidates = np.unique(missing_candidates).astype(np.int32)
|
211 |
|
|
|
231 |
# missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
232 |
# vertices = np.concatenate([vertices, missed_vertices])
|
233 |
|
|
|
|
|
|
|
234 |
|
235 |
edges = []
|
236 |
line_directions = []
|
|
|
269 |
inferred_vertices = infer_missing_vertices(ridge_edges, rake_edges)
|
270 |
missed_vertices = get_missed_vertices(vertices, inferred_vertices, **kwargs)
|
271 |
vertices = np.concatenate([vertices, missed_vertices])
|
272 |
+
if len(vertices) < 2:
|
273 |
+
return [], []
|
274 |
|
275 |
vertex_size = np.full(len(vertices), point_radius/2)
|
276 |
apex_radii *= point_radius_scale
|
|
|
293 |
# 'post',
|
294 |
'valley',
|
295 |
'hip',
|
296 |
+
'transition_line',
|
297 |
+
'fascia',
|
298 |
+
'soffit',]:
|
299 |
class_edges, class_directions = get_lines_and_directions(gest_seg_np, edge_class,
|
300 |
rho=rho,
|
301 |
theta=theta,
|
|
|
309 |
line_directions.append(class_directions)
|
310 |
|
311 |
edges = np.concatenate(edges).astype(np.float64)
|
|
|
312 |
if len(edges) < 1:
|
313 |
return [], []
|
314 |
+
line_directions = np.concatenate(line_directions).astype(np.float64)
|
315 |
+
|
316 |
# calculate the distances between the vertices and the edge ends
|
317 |
|
318 |
begin_edges = KDTree(edges[:, :2])
|
|
|
397 |
def get_uv_depth(vertices, depth):
|
398 |
'''Get the depth of the vertices from the depth image'''
|
399 |
|
400 |
+
depth[depth > 3000] = np.nan
|
401 |
uv = np.array([v['xy'] for v in vertices])
|
402 |
uv_int = uv.astype(np.int32)
|
403 |
H, W = depth.shape[:2]
|
|
|
442 |
|
443 |
all_3d_vertices = np.concatenate(all_3d_vertices, axis=0)
|
444 |
|
|
|
|
|
|
|
|
|
445 |
distmat = cdist(all_3d_vertices, all_3d_vertices)
|
446 |
types = np.array(types).reshape(-1, 1)
|
447 |
same_types = cdist(types, types)
|
|
|
469 |
new_vertices.append(all_3d_vertices[idxs].mean(axis=0))
|
470 |
for idx in idxs:
|
471 |
old_idx_to_new[idx] = count
|
|
|
472 |
new_vertices = np.array(new_vertices)
|
|
|
473 |
for conn in connections_3d:
|
474 |
new_con = sorted((old_idx_to_new[conn[0]], old_idx_to_new[conn[1]]))
|
475 |
if new_con[0] == new_con[1]:
|
476 |
continue
|
477 |
if new_con not in new_connections:
|
478 |
new_connections.append(new_con)
|
|
|
479 |
return new_vertices, new_connections
|
480 |
|
481 |
|
|
|
510 |
point_keys = [k for k, v in entry["points3d"].items()]
|
511 |
point_keys = np.array(point_keys)
|
512 |
|
513 |
+
clustered = DBSCAN(eps=clustering_eps, min_samples=5).fit(points).labels_
|
|
|
|
|
514 |
clustered_indices = np.argsort(clustered)
|
515 |
|
516 |
points = points[clustered_indices]
|
|
|
594 |
vert_edge_per_image[i] = np.empty((0, 2)), [], np.empty((0, 3))
|
595 |
continue
|
596 |
depth_np = np.array(depthcm) / scale_estimation_coefficient
|
597 |
+
# kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
|
598 |
+
# depth_np = cv2.filter2D(depth_np, -1, kernel)
|
599 |
uv, depth_vert_from_depth_map = get_uv_depth(vertices, depth_np)
|
600 |
try:
|
601 |
image = image_dict[imagekey]
|
|
|
619 |
#Revert to the depthmap
|
620 |
depthmap_used = True
|
621 |
|
622 |
+
# Normalize the uv to the camera intrinsics
|
623 |
|
624 |
xy_local = np.ones((len(uv), 3))
|
625 |
xy_local[:, 0] = (uv[:, 0] - K[0, 2]) / K[0, 0]
|
|
|
652 |
vertices_3d_local[depth_vert_nan_idxs] /= norm_factor_max
|
653 |
vertices_3d_local[~np.isin(np.arange(len(vertices_3d_local)), depth_vert_nan_idxs)] /= norm_factor_min
|
654 |
|
|
|
655 |
world_to_cam = np.eye(4)
|
656 |
world_to_cam[:3, :3] = R
|
657 |
world_to_cam[:3, 3] = t
|
|
|
668 |
vert_edge_per_image[i] = vertices, connections, vertices_3d
|
669 |
all_3d_vertices, connections_3d = merge_vertices_3d(vert_edge_per_image, **kwargs)
|
670 |
all_3d_vertices_clean, connections_3d_clean = all_3d_vertices, connections_3d
|
671 |
+
# all_3d_vertices_clean, connections_3d_clean = prune_not_connected(all_3d_vertices, connections_3d)
|
672 |
# highest_edges = np.argpartition(all_3d_vertices_clean[:, 1], 4)[:4].tolist()
|
673 |
#
|
674 |
# connections_3d_clean.append(highest_edges[:2])
|
script.py
CHANGED
@@ -142,13 +142,13 @@ if __name__ == "__main__":
|
|
142 |
results = []
|
143 |
for i, sample in enumerate(batch):
|
144 |
results.append(pool.submit(predict, sample,
|
145 |
-
point_radius=
|
146 |
max_angle=15,
|
147 |
extend=25,
|
148 |
-
merge_th=
|
149 |
-
min_missing_distance=
|
150 |
scale_estimation_coefficient=2.54,
|
151 |
-
clustering_eps=
|
152 |
interpolation_radius=10000,
|
153 |
point_radius_scale=0.5,
|
154 |
# dist_coeff=0,
|
|
|
142 |
results = []
|
143 |
for i, sample in enumerate(batch):
|
144 |
results.append(pool.submit(predict, sample,
|
145 |
+
point_radius=45,
|
146 |
max_angle=15,
|
147 |
extend=25,
|
148 |
+
merge_th=100.0,
|
149 |
+
min_missing_distance=1000.0,
|
150 |
scale_estimation_coefficient=2.54,
|
151 |
+
clustering_eps=150,
|
152 |
interpolation_radius=10000,
|
153 |
point_radius_scale=0.5,
|
154 |
# dist_coeff=0,
|