colin1842 commited on
Commit
f89da73
·
verified ·
1 Parent(s): c473808

Upload 3 files

Browse files
Files changed (3) hide show
  1. feature_solution.py +8 -4
  2. loftr_outdoor.ckpt +3 -0
  3. script.py +7 -0
feature_solution.py CHANGED
@@ -338,6 +338,10 @@ def prune_not_connected(all_3d_vertices, connections_3d):
338
 
339
  return np.array(new_verts), connected_out
340
 
 
 
 
 
341
  def loftr_matcher(gestalt_img_0, gestalt_img1, depth_images):
342
  import torchvision.transforms as transforms
343
  rgb_to_gray = transforms.Compose([
@@ -346,7 +350,7 @@ def loftr_matcher(gestalt_img_0, gestalt_img1, depth_images):
346
  transforms.ToTensor() # Convert back to tensor
347
  ])
348
 
349
- device = 'cpu'#torch.device('cuda' if torch.cuda.is_available() else 'cpu')
350
 
351
  w, h = depth_images.size
352
  gest_seg_0 = gestalt_img_0.resize(depth_images.size)
@@ -361,7 +365,7 @@ def loftr_matcher(gestalt_img_0, gestalt_img1, depth_images):
361
  gest_seg_1_tensor = K.image_to_tensor(gest_seg_1_np, False).float().to(device)
362
  img2 = K.geometry.resize(gest_seg_1_tensor, (int(h/4), int(w/4))) / 255
363
 
364
- matcher = KF.LoFTR(pretrained="outdoor").to(device)
365
 
366
  input_dict = {
367
  "image0": img1,
@@ -370,7 +374,7 @@ def loftr_matcher(gestalt_img_0, gestalt_img1, depth_images):
370
  # print("Input dict shape", input_dict["image0"].shape, input_dict["image1"].shape)
371
 
372
  with torch.no_grad():
373
- correspondences = matcher(input_dict)
374
 
375
  # mkpts0 = correspondences["keypoints0"].cpu().numpy()
376
  # mkpts1 = correspondences["keypoints1"].cpu().numpy()
@@ -620,7 +624,7 @@ def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
620
  # save_image_with_keypoints(f'keypoints_{j}.png', np.array(good_entry['gestalt'][j]), mkpts_filtered_1, (255, 0, 0))
621
 
622
  # Line matching
623
- line_0, line_1 = line_matcher(good_entry['gestalt'][i], good_entry['gestalt'][j], good_entry['depthcm'][i])
624
  # save_image_with_lines(f'line_{i}.png', np.array(good_entry['gestalt'][i]), line_0, (255, 0, 0))
625
  # save_image_with_lines(f'line_{j}.png', np.array(good_entry['gestalt'][j]), line_1, (255, 0, 0))
626
 
 
338
 
339
  return np.array(new_verts), connected_out
340
 
341
+ checkpoint_path = "loftr_outdoor.ckpt"
342
+ loftr_model = KF.LoFTR(pretrained=None)
343
+ loftr_model.load_state_dict(torch.load(checkpoint_path)['state_dict'])
344
+
345
  def loftr_matcher(gestalt_img_0, gestalt_img1, depth_images):
346
  import torchvision.transforms as transforms
347
  rgb_to_gray = transforms.Compose([
 
350
  transforms.ToTensor() # Convert back to tensor
351
  ])
352
 
353
+ device = 'cpu' #torch.device('cuda' if torch.cuda.is_available() else 'cpu')
354
 
355
  w, h = depth_images.size
356
  gest_seg_0 = gestalt_img_0.resize(depth_images.size)
 
365
  gest_seg_1_tensor = K.image_to_tensor(gest_seg_1_np, False).float().to(device)
366
  img2 = K.geometry.resize(gest_seg_1_tensor, (int(h/4), int(w/4))) / 255
367
 
368
+ # matcher = KF.LoFTR(pretrained="outdoor").to(device)
369
 
370
  input_dict = {
371
  "image0": img1,
 
374
  # print("Input dict shape", input_dict["image0"].shape, input_dict["image1"].shape)
375
 
376
  with torch.no_grad():
377
+ correspondences = loftr_model(input_dict)
378
 
379
  # mkpts0 = correspondences["keypoints0"].cpu().numpy()
380
  # mkpts1 = correspondences["keypoints1"].cpu().numpy()
 
624
  # save_image_with_keypoints(f'keypoints_{j}.png', np.array(good_entry['gestalt'][j]), mkpts_filtered_1, (255, 0, 0))
625
 
626
  # Line matching
627
+ # line_0, line_1 = line_matcher(good_entry['gestalt'][i], good_entry['gestalt'][j], good_entry['depthcm'][i])
628
  # save_image_with_lines(f'line_{i}.png', np.array(good_entry['gestalt'][i]), line_0, (255, 0, 0))
629
  # save_image_with_lines(f'line_{j}.png', np.array(good_entry['gestalt'][j]), line_1, (255, 0, 0))
630
 
loftr_outdoor.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21f5bec5968178e8bc8b7633441836fe5de4f47d861dd2cd7dc38e271b0479ec
3
+ size 46341978
script.py CHANGED
@@ -205,6 +205,13 @@ if __name__ == "__main__":
205
  'wf_vertices': pred_vertices.tolist(),
206
  'wf_edges': pred_edges
207
  })
 
 
 
 
 
 
 
208
  if i % 100 == 0:
209
  # incrementally save the results in case we run out of time
210
  print(f"Processed {i} samples")
 
205
  'wf_vertices': pred_vertices.tolist(),
206
  'wf_edges': pred_edges
207
  })
208
+ # for i, sample in enumerate(tqdm(dataset)):
209
+ # key, pred_vertices, pred_edges = predict(sample, visualize=False)
210
+ # solution.append({
211
+ # '__key__': key,
212
+ # 'wf_vertices': pred_vertices.tolist(),
213
+ # 'wf_edges': pred_edges
214
+ # })
215
  if i % 100 == 0:
216
  # incrementally save the results in case we run out of time
217
  print(f"Processed {i} samples")