ostapagon commited on
Commit
58ec876
·
1 Parent(s): 93cef7d

Change style a bit, add new whl. Add examples.

Browse files
app.py CHANGED
@@ -2,31 +2,25 @@ import sys
2
  sys.path.append('wild-gaussian-splatting/mast3r/')
3
  sys.path.append('demo/')
4
 
5
- import os
6
- os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
7
-
8
-
9
  import gradio as gr
10
  import torch
11
  from mast3r.demo import get_args_parser
12
  from mast3r_demo import mast3r_demo_tab
13
  from gs_demo import gs_demo_tab
14
 
15
- torch.backends.cuda.matmul.allow_tf32 = True
16
-
17
  if __name__ == '__main__':
18
  with gr.Blocks() as demo:
19
  gr.HTML('''
20
  <div style="text-align: center; padding: 20px; background-color: #f9f9f9; border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);">
21
- <h2 style="color: #333;">MASt3R and 3DGS Pipeline Demo</h2>
22
- <p style="font-size: 16px; color: #555;">This pipeline is designed for 3D reconstruction using MASt3R and 3DGS.</p>
23
- <p style="font-size: 16px; color: #555;">The process is divided into two stages:</p>
24
- <ol style="text-align: left; display: inline-block; margin: 0 auto; color: #555;">
25
  <li>MASt3R is used to obtain the initial point cloud and camera parameters.</li>
26
  <li>3DGS is then trained on the results from MASt3R to refine the 3D scene representation.</li>
27
  </ol>
28
- <p style="font-size: 16px; color: #555;">For a full version of this pipeline, please visit the repository at:</p>
29
- <a href="https://github.com/nerlfield/wild-gaussian-splatting" target="_blank" style="font-size: 16px; color: #007bff; text-decoration: none;">nerlfield/wild-gaussian-splatting</a>
30
  </div>
31
  ''')
32
 
 
2
  sys.path.append('wild-gaussian-splatting/mast3r/')
3
  sys.path.append('demo/')
4
 
 
 
 
 
5
  import gradio as gr
6
  import torch
7
  from mast3r.demo import get_args_parser
8
  from mast3r_demo import mast3r_demo_tab
9
  from gs_demo import gs_demo_tab
10
 
 
 
11
  if __name__ == '__main__':
12
  with gr.Blocks() as demo:
13
  gr.HTML('''
14
  <div style="text-align: center; padding: 20px; background-color: #f9f9f9; border-radius: 10px; box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);">
15
+ <h2>MASt3R and 3DGS Pipeline Demo</h2>
16
+ <p style="font-size: 16px;">This pipeline is designed for 3D reconstruction using MASt3R and 3DGS.</p>
17
+ <p style="font-size: 16px;">The process is divided into two stages:</p>
18
+ <ol style="text-align: left; display: inline-block; margin: 0 auto;">
19
  <li>MASt3R is used to obtain the initial point cloud and camera parameters.</li>
20
  <li>3DGS is then trained on the results from MASt3R to refine the 3D scene representation.</li>
21
  </ol>
22
+ <p style="font-size: 16px;">For a full version of this pipeline, please visit the repository at:</p>
23
+ <a href="https://github.com/nerlfield/wild-gaussian-splatting" target="_blank" style="font-size: 16px; text-decoration: none;">nerlfield/wild-gaussian-splatting</a>
24
  </div>
25
  ''')
26
 
demo/gs_demo.py CHANGED
@@ -6,7 +6,8 @@ from demo_globals import CACHE_PATH, MODEL, DEVICE, SILENT, DATASET_DIR
6
 
7
  def get_dataset_folders(datasets_path):
8
  try:
9
- return [f for f in os.listdir(datasets_path) if os.path.isdir(os.path.join(datasets_path, f))]
 
10
  except FileNotFoundError:
11
  return []
12
 
@@ -38,7 +39,7 @@ def gs_demo_tab():
38
 
39
  # Instructions
40
  gr.Markdown('''
41
- <div style="padding: 10px; background-color: #e9f7ef; border-radius: 5px; margin-bottom: 10px;">
42
  <h3>Instructions for 3DGS Demo</h3>
43
  <ul style="text-align: left; color: #333;">
44
  <li>Make sure to press "Refresh Datasets" to obtain an updated list of datasets from Stage 1. They are in the format run_0, run_1, run_...</li>
@@ -58,10 +59,9 @@ def gs_demo_tab():
58
  print("update_dataset_dropdown, cache_path", CACHE_PATH)
59
  # Update the dataset folders list
60
  dataset_folders = get_dataset_folders(dataset_path)
61
- # dataset_folders = "/app/data/scenes/"
62
  print("dataset_folders", dataset_folders)
63
- # Only set a default value if there are folders available
64
- default_value = dataset_folders[0] if dataset_folders else None
65
  return gr.Dropdown(label="Select Dataset", choices=dataset_folders, value=default_value)
66
 
67
  # Set the update function to be called when the refresh button is clicked
@@ -88,7 +88,7 @@ def gs_demo_tab():
88
  densify_from_iter = gr.Number(label="Densify From Iter", value=500)
89
  densify_until_iter = gr.Number(label="Densify Until Iter", value=15000)
90
  densify_grad_threshold = gr.Number(label="Densify Grad Threshold", value=0.0002)
91
- iterations = gr.Slider(label="Iterations", value=7000, minimum=1, maximum=15000, step=5)
92
 
93
  start_button = gr.Button("Start Training")
94
 
 
6
 
7
  def get_dataset_folders(datasets_path):
8
  try:
9
+ folders = [f for f in os.listdir(datasets_path) if os.path.isdir(os.path.join(datasets_path, f))]
10
+ return sorted(folders, key=lambda x: int(x.split('_')[-1]) if x.split('_')[-1].isdigit() else float('inf'))
11
  except FileNotFoundError:
12
  return []
13
 
 
39
 
40
  # Instructions
41
  gr.Markdown('''
42
+ <div style="padding: 10px; border-radius: 5px; margin-bottom: 10px;">
43
  <h3>Instructions for 3DGS Demo</h3>
44
  <ul style="text-align: left; color: #333;">
45
  <li>Make sure to press "Refresh Datasets" to obtain an updated list of datasets from Stage 1. They are in the format run_0, run_1, run_...</li>
 
59
  print("update_dataset_dropdown, cache_path", CACHE_PATH)
60
  # Update the dataset folders list
61
  dataset_folders = get_dataset_folders(dataset_path)
 
62
  print("dataset_folders", dataset_folders)
63
+ # Set the default value to the last run if there are folders available
64
+ default_value = dataset_folders[-1] if dataset_folders else None
65
  return gr.Dropdown(label="Select Dataset", choices=dataset_folders, value=default_value)
66
 
67
  # Set the update function to be called when the refresh button is clicked
 
88
  densify_from_iter = gr.Number(label="Densify From Iter", value=500)
89
  densify_until_iter = gr.Number(label="Densify Until Iter", value=15000)
90
  densify_grad_threshold = gr.Number(label="Densify Grad Threshold", value=0.0002)
91
+ iterations = gr.Slider(label="Iterations", value=9000, minimum=1, maximum=15000, step=5)
92
 
93
  start_button = gr.Button("Start Training")
94
 
demo/gs_train.py CHANGED
@@ -51,7 +51,7 @@ class ModelParams:
51
  data_device: str = "cuda"
52
  eval: bool = False
53
 
54
- @spaces.GPU(duration=20)
55
  def train(
56
  data_source_path, iterations, position_lr_init, position_lr_final, position_lr_delay_mult,
57
  position_lr_max_steps, feature_lr, opacity_lr, scaling_lr, rotation_lr,
 
51
  data_device: str = "cuda"
52
  eval: bool = False
53
 
54
+ @spaces.GPU(duration=30)
55
  def train(
56
  data_source_path, iterations, position_lr_init, position_lr_final, position_lr_delay_mult,
57
  position_lr_max_steps, feature_lr, opacity_lr, scaling_lr, rotation_lr,
demo/mast3r_demo.py CHANGED
@@ -25,13 +25,15 @@ from mast3r.cloud_opt.tsdf_optimizer import TSDFPostProcess
25
  from mast3r.model import AsymmetricMASt3R
26
  from dust3r.image_pairs import make_pairs
27
  from dust3r.utils.image import load_images
28
- from dust3r.utils.device import to_numpy
29
  from dust3r.viz import add_scene_cam, CAM_COLORS, OPENGL, pts3d_to_trimesh, cat_meshes
30
  from dust3r.demo import get_args_parser as dust3r_get_args_parser
 
31
 
32
  import matplotlib.pyplot as pl
33
 
34
  import torch
 
 
35
 
36
  from demo_globals import CACHE_PATH, MODEL, DEVICE, SILENT, DATASET_DIR
37
 
@@ -138,7 +140,7 @@ def get_3D_model_from_scene(scene, scene_state, min_conf_thr=2, as_pointcloud=Fa
138
  return _convert_scene_output_to_glb(outfile, rgbimg, pts3d, msk, focals, cams2world, as_pointcloud=as_pointcloud,
139
  transparent_cams=transparent_cams, cam_size=cam_size, silent=SILENT)
140
 
141
- def save_colmap_scene(scene, save_dir, min_conf_thr=2, clean_depth=False):
142
  if 'save_pointcloud_with_normals' not in globals():
143
  sys.path.append(os.path.join(os.path.dirname(__file__), '../wild-gaussian-splatting/gaussian-splatting'))
144
  sys.path.append(os.path.join(os.path.dirname(__file__), '../wild-gaussian-splatting/src'))
@@ -162,10 +164,7 @@ def save_colmap_scene(scene, save_dir, min_conf_thr=2, clean_depth=False):
162
  pts3d = [i.detach().reshape(imgs[0].shape) for i in pts3d] #
163
 
164
  masks = to_numpy([c > min_conf_thr for c in to_numpy(confs)])
165
-
166
  # move
167
- mask_images = True
168
-
169
  save_path, images_path, masks_path, sparse_path = init_filestructure(save_dir)
170
  save_images_masks(imgs, masks, images_path, masks_path, mask_images)
171
  save_cameras(focals, principal_points, sparse_path, imgs_shape=imgs.shape)
@@ -173,10 +172,10 @@ def save_colmap_scene(scene, save_dir, min_conf_thr=2, clean_depth=False):
173
  save_pointcloud_with_normals(imgs, pts3d, masks, sparse_path)
174
  return save_path
175
 
176
- @spaces.GPU(duration=10)
177
- def get_reconstructed_scene(current_scene_state,
178
- filelist, min_conf_thr, matching_conf_thr,
179
- as_pointcloud, cam_size, shared_intrinsics, **kw):
180
  """
181
  from a list of images, run mast3r inference, sparse global aligner.
182
  then run get_3D_model_from_scene
@@ -189,11 +188,11 @@ def get_reconstructed_scene(current_scene_state,
189
  filelist = [filelist[0], filelist[0] + '_2']
190
 
191
  lr1 = 0.07
192
- niter1 = 500
193
  lr2 = 0.014
194
- niter2 = 200
195
- optim_level = 'refine'
196
- mask_sky, clean_depth, transparent_cams = False, True, False
197
  if len(filelist) < 13:
198
  scenegraph_type = 'complete'
199
  winsize = 1
@@ -272,9 +271,8 @@ def mast3r_demo_tab():
272
  # Title for the MASt3R demo
273
  gradio.HTML('<h2 style="text-align: center;">MASt3R Demo</h2>')
274
 
275
- # Add instructions for the MASt3R demo
276
  gradio.HTML('''
277
- <div style="padding: 10px; background-color: #e9f7ef; border-radius: 5px; margin-bottom: 10px;">
278
  <h3>Instructions for MASt3R Demo</h3>
279
  <ul style="text-align: left; color: #333;">
280
  <li>Upload images. It is recommended to use no more than 10-12 images to avoid exceeding the 3-minute runtime limit for zeroGPU dynamic resources.</li>
@@ -285,7 +283,7 @@ def mast3r_demo_tab():
285
  ''')
286
 
287
  inputfiles = gradio.File(file_count="multiple")
288
-
289
  run_btn = gradio.Button("Run")
290
 
291
  with gradio.Row():
@@ -298,14 +296,64 @@ def mast3r_demo_tab():
298
  as_pointcloud = gradio.Checkbox(value=True, label="As pointcloud")
299
  shared_intrinsics = gradio.Checkbox(value=False, label="Shared intrinsics",
300
  info="Only optimize one set of intrinsics for all views")
301
-
 
302
  outmodel = gradio.Model3D()
303
  run_btn.click(
304
  fn=get_reconstructed_scene,
305
- inputs=[scene, inputfiles, min_conf_thr, matching_conf_thr,
306
- as_pointcloud, cam_size, shared_intrinsics],
307
  outputs=[scene, outmodel]
308
  )
309
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  return demo
311
 
 
25
  from mast3r.model import AsymmetricMASt3R
26
  from dust3r.image_pairs import make_pairs
27
  from dust3r.utils.image import load_images
 
28
  from dust3r.viz import add_scene_cam, CAM_COLORS, OPENGL, pts3d_to_trimesh, cat_meshes
29
  from dust3r.demo import get_args_parser as dust3r_get_args_parser
30
+ from copy import deepcopy
31
 
32
  import matplotlib.pyplot as pl
33
 
34
  import torch
35
+ import os.path as path
36
+ HERE_PATH = path.normpath(path.dirname(__file__)) # noqa
37
 
38
  from demo_globals import CACHE_PATH, MODEL, DEVICE, SILENT, DATASET_DIR
39
 
 
140
  return _convert_scene_output_to_glb(outfile, rgbimg, pts3d, msk, focals, cams2world, as_pointcloud=as_pointcloud,
141
  transparent_cams=transparent_cams, cam_size=cam_size, silent=SILENT)
142
 
143
+ def save_colmap_scene(scene, save_dir, min_conf_thr=2, clean_depth=False, mask_images=True):
144
  if 'save_pointcloud_with_normals' not in globals():
145
  sys.path.append(os.path.join(os.path.dirname(__file__), '../wild-gaussian-splatting/gaussian-splatting'))
146
  sys.path.append(os.path.join(os.path.dirname(__file__), '../wild-gaussian-splatting/src'))
 
164
  pts3d = [i.detach().reshape(imgs[0].shape) for i in pts3d] #
165
 
166
  masks = to_numpy([c > min_conf_thr for c in to_numpy(confs)])
 
167
  # move
 
 
168
  save_path, images_path, masks_path, sparse_path = init_filestructure(save_dir)
169
  save_images_masks(imgs, masks, images_path, masks_path, mask_images)
170
  save_cameras(focals, principal_points, sparse_path, imgs_shape=imgs.shape)
 
172
  save_pointcloud_with_normals(imgs, pts3d, masks, sparse_path)
173
  return save_path
174
 
175
+ @spaces.GPU(duration=30)
176
+ def get_reconstructed_scene(snapshot, current_scene_state,
177
+ min_conf_thr, matching_conf_thr,
178
+ as_pointcloud, cam_size, shared_intrinsics, clean_depth, filelist, **kw):
179
  """
180
  from a list of images, run mast3r inference, sparse global aligner.
181
  then run get_3D_model_from_scene
 
188
  filelist = [filelist[0], filelist[0] + '_2']
189
 
190
  lr1 = 0.07
191
+ niter1 = 700
192
  lr2 = 0.014
193
+ niter2 = 300
194
+ optim_level = 'refine+depth'
195
+ mask_sky, transparent_cams = False, False
196
  if len(filelist) < 13:
197
  scenegraph_type = 'complete'
198
  winsize = 1
 
271
  # Title for the MASt3R demo
272
  gradio.HTML('<h2 style="text-align: center;">MASt3R Demo</h2>')
273
 
 
274
  gradio.HTML('''
275
+ <div style="padding: 10px; border-radius: 5px; margin-bottom: 10px;">
276
  <h3>Instructions for MASt3R Demo</h3>
277
  <ul style="text-align: left; color: #333;">
278
  <li>Upload images. It is recommended to use no more than 10-12 images to avoid exceeding the 3-minute runtime limit for zeroGPU dynamic resources.</li>
 
283
  ''')
284
 
285
  inputfiles = gradio.File(file_count="multiple")
286
+ snapshot = gradio.Image(None, visible=False)
287
  run_btn = gradio.Button("Run")
288
 
289
  with gradio.Row():
 
296
  as_pointcloud = gradio.Checkbox(value=True, label="As pointcloud")
297
  shared_intrinsics = gradio.Checkbox(value=False, label="Shared intrinsics",
298
  info="Only optimize one set of intrinsics for all views")
299
+ clean_depth = gradio.Checkbox(value=False, label="Clean depth")
300
+
301
  outmodel = gradio.Model3D()
302
  run_btn.click(
303
  fn=get_reconstructed_scene,
304
+ inputs=[snapshot, scene, min_conf_thr, matching_conf_thr,
305
+ as_pointcloud, cam_size, shared_intrinsics, clean_depth, inputfiles],
306
  outputs=[scene, outmodel]
307
  )
308
+
309
+ tower_folder = os.path.join(HERE_PATH, '../wild-gaussian-splatting/mast3r/assets/NLE_tower/')
310
+ turtle_folder = os.path.join(HERE_PATH, '../wild-gaussian-splatting/data/images/turtle_imgs/')
311
+ puma_folder = os.path.join(HERE_PATH, '../wild-gaussian-splatting/data/images/puma_imgs/')
312
+ tower_images = [os.path.join(tower_folder, file) for file in os.listdir(tower_folder) if file.endswith('.jpg') and not file.startswith('2679C386-1DC0-4443-81B5-93D7EDE4AB37-83120-000041DADB2EA917')] # my code not addpted to different size input
313
+ turtle_images = [os.path.join(turtle_folder, file) for file in os.listdir(turtle_folder) if file.endswith('.jpg')]
314
+ puma_images = [os.path.join(puma_folder, file) for file in os.listdir(puma_folder)[:12] if file.endswith('.jpg')]
315
+
316
+
317
+ examples = gradio.Examples(
318
+ examples=[
319
+ [
320
+ puma_images[0],
321
+ None,
322
+ 1.5, 0.0, 0.2, True, True, False,
323
+ puma_images,
324
+ ]
325
+ ],
326
+ inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
327
+ outputs=[scene, outmodel],
328
+ fn=get_reconstructed_scene,
329
+ )
330
+ examples = gradio.Examples(
331
+ examples=[
332
+ [
333
+ turtle_images[0],
334
+ None,
335
+ 1.5, 0.0, 0.2, True, True, False,
336
+ turtle_images,
337
+ ]
338
+ ],
339
+ inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
340
+ outputs=[scene, outmodel],
341
+ fn=get_reconstructed_scene,
342
+ )
343
+ examples = gradio.Examples(
344
+ examples=[
345
+ [
346
+ tower_images[0],
347
+ None,
348
+ 1.5, 0.0, 0.2, True, True, False,
349
+ tower_images,
350
+ ]
351
+ ],
352
+ inputs=[snapshot, scene, min_conf_thr, matching_conf_thr, cam_size, as_pointcloud, shared_intrinsics, clean_depth, inputfiles],
353
+ outputs=[scene, outmodel],
354
+ fn=get_reconstructed_scene,
355
+ )
356
+
357
+
358
  return demo
359
 
requirements.txt CHANGED
@@ -12,7 +12,6 @@ scipy
12
  einops
13
  trimesh
14
 
15
- https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl?download=true
16
  https://huggingface.co/spaces/ostapagon/mast3r-3dgs/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
17
  https://huggingface.co/spaces/ostapagon/mast3r-3dgs/resolve/main/wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
18
 
 
12
  einops
13
  trimesh
14
 
 
15
  https://huggingface.co/spaces/ostapagon/mast3r-3dgs/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
16
  https://huggingface.co/spaces/ostapagon/mast3r-3dgs/resolve/main/wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl?download=true
17
 
wheels/simple_knn-0.0.0-cp310-cp310-linux_x86_64.whl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fca844adde511fb22934188e1311f3d7f43e8a1f22d9c7a21957d5dbd10c2bb
3
- size 2987149
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03f3f1b4b1e534649efb34ad8f1d8fe2dacf0e11eb9ca83632833e386b884927
3
+ size 3043174