Spaces:
Sleeping
Sleeping
Commit
·
5cfa18d
1
Parent(s):
9650ca8
updates
Browse files- .devcontainer/devcontainer.json +0 -33
- app.py +31 -28
- media/run.txt +0 -0
- ply_run.py +29 -29
- requirements.txt +5 -1
- res/res.txt +0 -0
- res_space/run.txt +0 -0
- sfm.py +24 -11
- space_carving.py +110 -110
.devcontainer/devcontainer.json
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"name": "Python 3",
|
3 |
-
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
4 |
-
"image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye",
|
5 |
-
"customizations": {
|
6 |
-
"codespaces": {
|
7 |
-
"openFiles": [
|
8 |
-
"README.md",
|
9 |
-
"app.py"
|
10 |
-
]
|
11 |
-
},
|
12 |
-
"vscode": {
|
13 |
-
"settings": {},
|
14 |
-
"extensions": [
|
15 |
-
"ms-python.python",
|
16 |
-
"ms-python.vscode-pylance"
|
17 |
-
]
|
18 |
-
}
|
19 |
-
},
|
20 |
-
"updateContentCommand": "[ -f packages.txt ] && sudo apt update && sudo apt upgrade -y && sudo xargs apt install -y <packages.txt; [ -f requirements.txt ] && pip3 install --user -r requirements.txt; pip3 install --user streamlit; echo '✅ Packages installed and Requirements met'",
|
21 |
-
"postAttachCommand": {
|
22 |
-
"server": "streamlit run app.py --server.enableCORS false --server.enableXsrfProtection false"
|
23 |
-
},
|
24 |
-
"portsAttributes": {
|
25 |
-
"8501": {
|
26 |
-
"label": "Application",
|
27 |
-
"onAutoForward": "openPreview"
|
28 |
-
}
|
29 |
-
},
|
30 |
-
"forwardPorts": [
|
31 |
-
8501
|
32 |
-
]
|
33 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -9,8 +9,6 @@ from PIL import Image
|
|
9 |
import matplotlib.pyplot as plt
|
10 |
import numpy as np
|
11 |
import plotly.graph_objects as go
|
12 |
-
from helpers import *
|
13 |
-
|
14 |
|
15 |
st.set_page_config(page_title="3D Reconstruction Web App", layout="wide")
|
16 |
st.markdown("<div style='text-align: center;'><h1>3D Scene Reconstruction</h1></div>", unsafe_allow_html=True)
|
@@ -129,29 +127,33 @@ def show_ply_interactive(ply_path):
|
|
129 |
return fig
|
130 |
|
131 |
# Show PLY as image
|
132 |
-
def show_ply_as_image(ply_path):
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
|
|
155 |
|
156 |
# ---------- Function to extract zip ----------
|
157 |
def extract_zip(zip_file, extract_to):
|
@@ -221,7 +223,7 @@ if sfm_zip_file is not None:
|
|
221 |
st.success(f"Extracted {zip_name} dataset.")
|
222 |
|
223 |
if st.button("Run SfM Model"):
|
224 |
-
output = run_sfm(sfm_extract_path
|
225 |
st.success("Model ran successfully.")
|
226 |
|
227 |
# Construct PLY path based on zip filename
|
@@ -229,8 +231,8 @@ if sfm_zip_file is not None:
|
|
229 |
|
230 |
if os.path.exists(ply_path):
|
231 |
st.markdown("### 🧩 Reconstructed Point Cloud Image")
|
232 |
-
image = show_ply_as_image(ply_path)
|
233 |
-
st.image(image, caption=f"{zip_name}.ply", use_column_width=True)
|
234 |
|
235 |
# Optional download
|
236 |
with open(ply_path, "rb") as f:
|
@@ -251,6 +253,7 @@ if sfm_zip_file is not None:
|
|
251 |
|
252 |
|
253 |
|
|
|
254 |
st.header("🧠 Pix2Vox")
|
255 |
|
256 |
uploaded_images = st.file_uploader(f"Upload images", accept_multiple_files=True, type=["png", "jpg", "jpeg"])
|
|
|
9 |
import matplotlib.pyplot as plt
|
10 |
import numpy as np
|
11 |
import plotly.graph_objects as go
|
|
|
|
|
12 |
|
13 |
st.set_page_config(page_title="3D Reconstruction Web App", layout="wide")
|
14 |
st.markdown("<div style='text-align: center;'><h1>3D Scene Reconstruction</h1></div>", unsafe_allow_html=True)
|
|
|
127 |
return fig
|
128 |
|
129 |
# Show PLY as image
|
130 |
+
# def show_ply_as_image(ply_path):
|
131 |
+
# # Load point cloud
|
132 |
+
# pcd = o3d.io.read_point_cloud(ply_path)
|
133 |
+
# if pcd.is_empty():
|
134 |
+
# raise ValueError("The .ply file is empty or could not be loaded.")
|
135 |
+
|
136 |
+
# # Create visualization window (offscreen)
|
137 |
+
# vis = o3d.visualization.Visualizer()
|
138 |
+
# vis.create_window(visible=False)
|
139 |
+
# vis.add_geometry(pcd)
|
140 |
+
|
141 |
+
# # Set camera view
|
142 |
+
# ctr = vis.get_view_control()
|
143 |
+
# if ctr is None:
|
144 |
+
# raise RuntimeError("Failed to get view control from the visualizer.")
|
145 |
+
# ctr.set_zoom(0.7)
|
146 |
+
|
147 |
+
# vis.poll_events()
|
148 |
+
# vis.update_renderer()
|
149 |
+
|
150 |
+
# # Screenshot to numpy
|
151 |
+
# image = vis.capture_screen_float_buffer(do_render=True)
|
152 |
+
# vis.destroy_window()
|
153 |
+
|
154 |
+
# # Convert to displayable image
|
155 |
+
# img = (np.asarray(image) * 255).astype(np.uint8)
|
156 |
+
# return Image.fromarray(img)
|
157 |
|
158 |
# ---------- Function to extract zip ----------
|
159 |
def extract_zip(zip_file, extract_to):
|
|
|
223 |
st.success(f"Extracted {zip_name} dataset.")
|
224 |
|
225 |
if st.button("Run SfM Model"):
|
226 |
+
output = run_sfm(os.path.join(sfm_extract_path,zip_name))
|
227 |
st.success("Model ran successfully.")
|
228 |
|
229 |
# Construct PLY path based on zip filename
|
|
|
231 |
|
232 |
if os.path.exists(ply_path):
|
233 |
st.markdown("### 🧩 Reconstructed Point Cloud Image")
|
234 |
+
# image = show_ply_as_image(ply_path)
|
235 |
+
# st.image(image, caption=f"{zip_name}.ply", use_column_width=True)
|
236 |
|
237 |
# Optional download
|
238 |
with open(ply_path, "rb") as f:
|
|
|
253 |
|
254 |
|
255 |
|
256 |
+
|
257 |
st.header("🧠 Pix2Vox")
|
258 |
|
259 |
uploaded_images = st.file_uploader(f"Upload images", accept_multiple_files=True, type=["png", "jpg", "jpeg"])
|
media/run.txt
ADDED
File without changes
|
ply_run.py
CHANGED
@@ -1,29 +1,29 @@
|
|
1 |
-
import open3d as o3d
|
2 |
-
from PIL import Image
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
# Show PLY as image
|
7 |
-
def show_ply_as_image(ply_path):
|
8 |
-
# Load point cloud
|
9 |
-
pcd = o3d.io.read_point_cloud(ply_path)
|
10 |
-
|
11 |
-
# Create visualization window (offscreen)
|
12 |
-
vis = o3d.visualization.Visualizer()
|
13 |
-
vis.create_window(visible=False)
|
14 |
-
vis.add_geometry(pcd)
|
15 |
-
|
16 |
-
# Set camera view
|
17 |
-
ctr = vis.get_view_control()
|
18 |
-
ctr.set_zoom(0.7)
|
19 |
-
|
20 |
-
vis.poll_events()
|
21 |
-
vis.update_renderer()
|
22 |
-
|
23 |
-
# Screenshot to numpy
|
24 |
-
image = vis.capture_screen_float_buffer(do_render=True)
|
25 |
-
vis.destroy_window()
|
26 |
-
|
27 |
-
# Convert to displayable image
|
28 |
-
img = (np.asarray(image) * 255).astype(np.uint8)
|
29 |
-
return Image.fromarray(img)
|
|
|
1 |
+
import open3d as o3d
|
2 |
+
from PIL import Image
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
# Show PLY as image
|
7 |
+
def show_ply_as_image(ply_path):
|
8 |
+
# Load point cloud
|
9 |
+
pcd = o3d.io.read_point_cloud(ply_path)
|
10 |
+
|
11 |
+
# Create visualization window (offscreen)
|
12 |
+
vis = o3d.visualization.Visualizer()
|
13 |
+
vis.create_window(visible=False)
|
14 |
+
vis.add_geometry(pcd)
|
15 |
+
|
16 |
+
# Set camera view
|
17 |
+
ctr = vis.get_view_control()
|
18 |
+
ctr.set_zoom(0.7)
|
19 |
+
|
20 |
+
vis.poll_events()
|
21 |
+
vis.update_renderer()
|
22 |
+
|
23 |
+
# Screenshot to numpy
|
24 |
+
image = vis.capture_screen_float_buffer(do_render=True)
|
25 |
+
vis.destroy_window()
|
26 |
+
|
27 |
+
# Convert to displayable image
|
28 |
+
img = (np.asarray(image) * 255).astype(np.uint8)
|
29 |
+
return Image.fromarray(img)
|
requirements.txt
CHANGED
@@ -48,6 +48,11 @@ Werkzeug==3.0.6
|
|
48 |
widgetsnbextension==4.0.14
|
49 |
zipp==3.21.0
|
50 |
vtk
|
|
|
|
|
|
|
|
|
|
|
51 |
opencv-python
|
52 |
opencv-contrib-python
|
53 |
streamlit
|
@@ -57,4 +62,3 @@ argparse
|
|
57 |
easydict
|
58 |
torch
|
59 |
torchvision
|
60 |
-
|
|
|
48 |
widgetsnbextension==4.0.14
|
49 |
zipp==3.21.0
|
50 |
vtk
|
51 |
+
opencv-python-headless
|
52 |
+
opencv-contrib-python
|
53 |
+
streamlit
|
54 |
+
matplotlib
|
55 |
+
open3d
|
56 |
opencv-python
|
57 |
opencv-contrib-python
|
58 |
streamlit
|
|
|
62 |
easydict
|
63 |
torch
|
64 |
torchvision
|
|
res/res.txt
ADDED
File without changes
|
res_space/run.txt
ADDED
File without changes
|
sfm.py
CHANGED
@@ -9,13 +9,13 @@ import matplotlib.pyplot as plt
|
|
9 |
class Image_loader():
|
10 |
def __init__(self, img_dir:str, downscale_factor:float):
|
11 |
# loading the Camera intrinsic parameters K
|
12 |
-
with open(img_dir
|
13 |
self.K = np.array(list((map(lambda x:list(map(lambda x:float(x), x.strip().split(' '))),f.read().split('\n')))))
|
14 |
self.image_list = []
|
15 |
# Loading the set of images
|
16 |
for image in sorted(os.listdir(img_dir)):
|
17 |
if image[-4:].lower() == '.jpg' or image[-5:].lower() == '.png':
|
18 |
-
self.image_list.append(img_dir
|
19 |
|
20 |
self.path = os.getcwd()
|
21 |
self.factor = downscale_factor
|
@@ -143,7 +143,16 @@ class Sfm():
|
|
143 |
property uchar red
|
144 |
end_header
|
145 |
'''
|
146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
f.write(ply_header % dict(vert_num=len(verts)))
|
148 |
np.savetxt(f, verts, '%f %f %f %d %d %d')
|
149 |
|
@@ -194,7 +203,7 @@ class Sfm():
|
|
194 |
return np.float32([key_points_0[m.queryIdx].pt for m in feature]), np.float32([key_points_1[m.trainIdx].pt for m in feature])
|
195 |
|
196 |
def __call__(self, enable_bundle_adjustment:boolean=False):
|
197 |
-
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
|
198 |
pose_array = self.img_obj.K.ravel()
|
199 |
transform_matrix_0 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
|
200 |
transform_matrix_1 = np.empty((3, 4))
|
@@ -279,24 +288,28 @@ class Sfm():
|
|
279 |
|
280 |
transform_matrix_0 = np.copy(transform_matrix_1)
|
281 |
pose_0 = np.copy(pose_1)
|
282 |
-
plt.scatter(i, error)
|
283 |
-
plt.pause(0.05)
|
284 |
|
285 |
image_0 = np.copy(image_1)
|
286 |
image_1 = np.copy(image_2)
|
287 |
feature_0 = np.copy(features_cur)
|
288 |
feature_1 = np.copy(features_2)
|
289 |
pose_1 = np.copy(pose_2)
|
290 |
-
cv2.imshow(self.img_obj.image_list[0].split('\\')[-2], image_2)
|
291 |
-
if cv2.waitKey(1) & 0xff == ord('q'):
|
292 |
-
|
293 |
-
cv2.destroyAllWindows()
|
294 |
|
295 |
print("Printing to .ply file")
|
296 |
print(total_points.shape, total_colors.shape)
|
297 |
self.to_ply(self.img_obj.path, total_points, total_colors)
|
298 |
print("Completed Exiting ...")
|
299 |
-
|
|
|
|
|
|
|
|
|
300 |
|
301 |
|
302 |
|
|
|
9 |
class Image_loader():
|
10 |
def __init__(self, img_dir:str, downscale_factor:float):
|
11 |
# loading the Camera intrinsic parameters K
|
12 |
+
with open(os.path.join(img_dir,'K.txt')) as f:
|
13 |
self.K = np.array(list((map(lambda x:list(map(lambda x:float(x), x.strip().split(' '))),f.read().split('\n')))))
|
14 |
self.image_list = []
|
15 |
# Loading the set of images
|
16 |
for image in sorted(os.listdir(img_dir)):
|
17 |
if image[-4:].lower() == '.jpg' or image[-5:].lower() == '.png':
|
18 |
+
self.image_list.append(os.path.join(img_dir,image))
|
19 |
|
20 |
self.path = os.getcwd()
|
21 |
self.factor = downscale_factor
|
|
|
143 |
property uchar red
|
144 |
end_header
|
145 |
'''
|
146 |
+
# Ensure the 'res' directory exists
|
147 |
+
res_dir = os.path.join(path, 'res')
|
148 |
+
os.makedirs(res_dir, exist_ok=True)
|
149 |
+
|
150 |
+
# Use os.path.basename to extract the folder name in a platform-independent way
|
151 |
+
folder_name = os.path.basename(os.path.dirname(self.img_obj.image_list[0]))
|
152 |
+
ply_file_path = os.path.join(res_dir, folder_name + '.ply')
|
153 |
+
|
154 |
+
# Write the .ply file
|
155 |
+
with open(ply_file_path, 'w') as f:
|
156 |
f.write(ply_header % dict(vert_num=len(verts)))
|
157 |
np.savetxt(f, verts, '%f %f %f %d %d %d')
|
158 |
|
|
|
203 |
return np.float32([key_points_0[m.queryIdx].pt for m in feature]), np.float32([key_points_1[m.trainIdx].pt for m in feature])
|
204 |
|
205 |
def __call__(self, enable_bundle_adjustment:boolean=False):
|
206 |
+
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
|
207 |
pose_array = self.img_obj.K.ravel()
|
208 |
transform_matrix_0 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])
|
209 |
transform_matrix_1 = np.empty((3, 4))
|
|
|
288 |
|
289 |
transform_matrix_0 = np.copy(transform_matrix_1)
|
290 |
pose_0 = np.copy(pose_1)
|
291 |
+
# plt.scatter(i, error)
|
292 |
+
# plt.pause(0.05)
|
293 |
|
294 |
image_0 = np.copy(image_1)
|
295 |
image_1 = np.copy(image_2)
|
296 |
feature_0 = np.copy(features_cur)
|
297 |
feature_1 = np.copy(features_2)
|
298 |
pose_1 = np.copy(pose_2)
|
299 |
+
# # cv2.imshow(self.img_obj.image_list[0].split('\\')[-2], image_2)
|
300 |
+
# if cv2.waitKey(1) & 0xff == ord('q'):
|
301 |
+
# break
|
302 |
+
# cv2.destroyAllWindows()
|
303 |
|
304 |
print("Printing to .ply file")
|
305 |
print(total_points.shape, total_colors.shape)
|
306 |
self.to_ply(self.img_obj.path, total_points, total_colors)
|
307 |
print("Completed Exiting ...")
|
308 |
+
output_dir = os.path.join(self.img_obj.path, 'res')
|
309 |
+
os.makedirs(output_dir, exist_ok=True) # Ensure the 'res' directory exists
|
310 |
+
folder_name = os.path.basename(os.path.dirname(self.img_obj.image_list[0]))
|
311 |
+
output_file = os.path.join(output_dir, f"{folder_name}_pose_array.csv")
|
312 |
+
np.savetxt(output_file, pose_array, delimiter='\n')
|
313 |
|
314 |
|
315 |
|
space_carving.py
CHANGED
@@ -1,111 +1,111 @@
|
|
1 |
-
import scipy.io
|
2 |
-
import numpy as np
|
3 |
-
import cv2
|
4 |
-
import glob
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
import vtk
|
7 |
-
|
8 |
-
def run_space_carving():
|
9 |
-
data = scipy.io.loadmat("uploads_spacecarving/data/dino_Ps.mat")["P"]
|
10 |
-
projections = [data[0, i] for i in range(data.shape[1])]
|
11 |
-
|
12 |
-
# === Load and preprocess images ===
|
13 |
-
files = sorted(glob.glob("uploads_spacecarving/data/*.ppm"))
|
14 |
-
images = []
|
15 |
-
for f in files:
|
16 |
-
im = cv2.imread(f, cv2.IMREAD_UNCHANGED).astype(float)
|
17 |
-
im /= 255
|
18 |
-
images.append(im[:, :, ::-1]) # BGR to RGB
|
19 |
-
|
20 |
-
# === Create silhouettes ===
|
21 |
-
imgH, imgW, _ = images[0].shape
|
22 |
-
silhouettes = []
|
23 |
-
for im in images:
|
24 |
-
mask = np.abs(im - [0.0, 0.0, 0.75])
|
25 |
-
mask = np.sum(mask, axis=2)
|
26 |
-
y, x = np.where(mask <= 1.1)
|
27 |
-
im[y, x, :] = [0.0, 0.0, 0.0]
|
28 |
-
im = im[:, :, 0]
|
29 |
-
im[im > 0] = 1.0
|
30 |
-
im = im.astype(np.uint8)
|
31 |
-
kernel = np.ones((5, 5), np.uint8)
|
32 |
-
im = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
|
33 |
-
silhouettes.append(im)
|
34 |
-
|
35 |
-
# === Create voxel grid ===
|
36 |
-
s = 120
|
37 |
-
x, y, z = np.mgrid[:s, :s, :s]
|
38 |
-
pts = np.vstack((x.flatten(), y.flatten(), z.flatten())).astype(float).T
|
39 |
-
nb_points_init = pts.shape[0]
|
40 |
-
|
41 |
-
# Normalize and center
|
42 |
-
pts[:, 0] /= np.max(pts[:, 0])
|
43 |
-
pts[:, 1] /= np.max(pts[:, 1])
|
44 |
-
pts[:, 2] /= np.max(pts[:, 2])
|
45 |
-
center = np.mean(pts, axis=0)
|
46 |
-
pts -= center
|
47 |
-
pts /= 5
|
48 |
-
pts[:, 2] -= 0.62
|
49 |
-
|
50 |
-
# Homogeneous coordinates
|
51 |
-
pts_hom = np.vstack((pts.T, np.ones((1, nb_points_init))))
|
52 |
-
|
53 |
-
# === Voxel carving: count silhouettes where voxel is occupied ===
|
54 |
-
filled = []
|
55 |
-
for P, sil in zip(projections, silhouettes):
|
56 |
-
uvs = P @ pts_hom
|
57 |
-
uvs /= uvs[2, :]
|
58 |
-
uvs = np.round(uvs).astype(int)
|
59 |
-
x_valid = np.logical_and(uvs[0, :] >= 0, uvs[0, :] < imgW)
|
60 |
-
y_valid = np.logical_and(uvs[1, :] >= 0, uvs[1, :] < imgH)
|
61 |
-
valid = np.logical_and(x_valid, y_valid)
|
62 |
-
indices = np.where(valid)[0]
|
63 |
-
fill = np.zeros(uvs.shape[1])
|
64 |
-
sub_uvs = uvs[:2, indices]
|
65 |
-
res = sil[sub_uvs[1, :], sub_uvs[0, :]]
|
66 |
-
fill[indices] = res
|
67 |
-
filled.append(fill)
|
68 |
-
|
69 |
-
filled = np.vstack(filled)
|
70 |
-
occupancy = np.sum(filled, axis=0)
|
71 |
-
|
72 |
-
# === Save voxel grid as .vtr (only the voxels with occupancy > threshold) ===
|
73 |
-
threshold = 25
|
74 |
-
occupancy_mask = (occupancy > threshold).astype(np.float32)
|
75 |
-
|
76 |
-
# Create grid coordinates
|
77 |
-
x_coords = sorted(list(set(np.round(pts[:, 0][::s*s], 6))))
|
78 |
-
y_coords = sorted(list(set(np.round(pts[:, 1][:s*s:s], 6))))
|
79 |
-
z_coords = sorted(list(set(np.round(pts[:, 2][:s], 6))))
|
80 |
-
|
81 |
-
x_array = vtk.vtkFloatArray()
|
82 |
-
y_array = vtk.vtkFloatArray()
|
83 |
-
z_array = vtk.vtkFloatArray()
|
84 |
-
|
85 |
-
for val in x_coords:
|
86 |
-
x_array.InsertNextValue(val)
|
87 |
-
for val in y_coords:
|
88 |
-
y_array.InsertNextValue(val)
|
89 |
-
for val in z_coords:
|
90 |
-
z_array.InsertNextValue(val)
|
91 |
-
|
92 |
-
# Only add occupancy values for retained voxels
|
93 |
-
values = vtk.vtkFloatArray()
|
94 |
-
values.SetName("Occupancy")
|
95 |
-
for i in range(len(occupancy_mask)):
|
96 |
-
values.InsertNextValue(occupancy_mask[i])
|
97 |
-
|
98 |
-
# Create rectilinear grid
|
99 |
-
rgrid = vtk.vtkRectilinearGrid()
|
100 |
-
rgrid.SetDimensions(len(x_coords), len(y_coords), len(z_coords))
|
101 |
-
rgrid.SetXCoordinates(x_array)
|
102 |
-
rgrid.SetYCoordinates(y_array)
|
103 |
-
rgrid.SetZCoordinates(z_array)
|
104 |
-
rgrid.GetPointData().SetScalars(values)
|
105 |
-
|
106 |
-
# Save to .vtr
|
107 |
-
writer = vtk.vtkXMLRectilinearGridWriter()
|
108 |
-
writer.SetFileName("res_space/shape.vtr")
|
109 |
-
writer.SetInputData(rgrid)
|
110 |
-
writer.Write()
|
111 |
|
|
|
1 |
+
import scipy.io
|
2 |
+
import numpy as np
|
3 |
+
import cv2
|
4 |
+
import glob
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
import vtk
|
7 |
+
|
8 |
+
def run_space_carving():
|
9 |
+
data = scipy.io.loadmat("uploads_spacecarving/data/dino_Ps.mat")["P"]
|
10 |
+
projections = [data[0, i] for i in range(data.shape[1])]
|
11 |
+
|
12 |
+
# === Load and preprocess images ===
|
13 |
+
files = sorted(glob.glob("uploads_spacecarving/data/*.ppm"))
|
14 |
+
images = []
|
15 |
+
for f in files:
|
16 |
+
im = cv2.imread(f, cv2.IMREAD_UNCHANGED).astype(float)
|
17 |
+
im /= 255
|
18 |
+
images.append(im[:, :, ::-1]) # BGR to RGB
|
19 |
+
|
20 |
+
# === Create silhouettes ===
|
21 |
+
imgH, imgW, _ = images[0].shape
|
22 |
+
silhouettes = []
|
23 |
+
for im in images:
|
24 |
+
mask = np.abs(im - [0.0, 0.0, 0.75])
|
25 |
+
mask = np.sum(mask, axis=2)
|
26 |
+
y, x = np.where(mask <= 1.1)
|
27 |
+
im[y, x, :] = [0.0, 0.0, 0.0]
|
28 |
+
im = im[:, :, 0]
|
29 |
+
im[im > 0] = 1.0
|
30 |
+
im = im.astype(np.uint8)
|
31 |
+
kernel = np.ones((5, 5), np.uint8)
|
32 |
+
im = cv2.morphologyEx(im, cv2.MORPH_OPEN, kernel)
|
33 |
+
silhouettes.append(im)
|
34 |
+
|
35 |
+
# === Create voxel grid ===
|
36 |
+
s = 120
|
37 |
+
x, y, z = np.mgrid[:s, :s, :s]
|
38 |
+
pts = np.vstack((x.flatten(), y.flatten(), z.flatten())).astype(float).T
|
39 |
+
nb_points_init = pts.shape[0]
|
40 |
+
|
41 |
+
# Normalize and center
|
42 |
+
pts[:, 0] /= np.max(pts[:, 0])
|
43 |
+
pts[:, 1] /= np.max(pts[:, 1])
|
44 |
+
pts[:, 2] /= np.max(pts[:, 2])
|
45 |
+
center = np.mean(pts, axis=0)
|
46 |
+
pts -= center
|
47 |
+
pts /= 5
|
48 |
+
pts[:, 2] -= 0.62
|
49 |
+
|
50 |
+
# Homogeneous coordinates
|
51 |
+
pts_hom = np.vstack((pts.T, np.ones((1, nb_points_init))))
|
52 |
+
|
53 |
+
# === Voxel carving: count silhouettes where voxel is occupied ===
|
54 |
+
filled = []
|
55 |
+
for P, sil in zip(projections, silhouettes):
|
56 |
+
uvs = P @ pts_hom
|
57 |
+
uvs /= uvs[2, :]
|
58 |
+
uvs = np.round(uvs).astype(int)
|
59 |
+
x_valid = np.logical_and(uvs[0, :] >= 0, uvs[0, :] < imgW)
|
60 |
+
y_valid = np.logical_and(uvs[1, :] >= 0, uvs[1, :] < imgH)
|
61 |
+
valid = np.logical_and(x_valid, y_valid)
|
62 |
+
indices = np.where(valid)[0]
|
63 |
+
fill = np.zeros(uvs.shape[1])
|
64 |
+
sub_uvs = uvs[:2, indices]
|
65 |
+
res = sil[sub_uvs[1, :], sub_uvs[0, :]]
|
66 |
+
fill[indices] = res
|
67 |
+
filled.append(fill)
|
68 |
+
|
69 |
+
filled = np.vstack(filled)
|
70 |
+
occupancy = np.sum(filled, axis=0)
|
71 |
+
|
72 |
+
# === Save voxel grid as .vtr (only the voxels with occupancy > threshold) ===
|
73 |
+
threshold = 25
|
74 |
+
occupancy_mask = (occupancy > threshold).astype(np.float32)
|
75 |
+
|
76 |
+
# Create grid coordinates
|
77 |
+
x_coords = sorted(list(set(np.round(pts[:, 0][::s*s], 6))))
|
78 |
+
y_coords = sorted(list(set(np.round(pts[:, 1][:s*s:s], 6))))
|
79 |
+
z_coords = sorted(list(set(np.round(pts[:, 2][:s], 6))))
|
80 |
+
|
81 |
+
x_array = vtk.vtkFloatArray()
|
82 |
+
y_array = vtk.vtkFloatArray()
|
83 |
+
z_array = vtk.vtkFloatArray()
|
84 |
+
|
85 |
+
for val in x_coords:
|
86 |
+
x_array.InsertNextValue(val)
|
87 |
+
for val in y_coords:
|
88 |
+
y_array.InsertNextValue(val)
|
89 |
+
for val in z_coords:
|
90 |
+
z_array.InsertNextValue(val)
|
91 |
+
|
92 |
+
# Only add occupancy values for retained voxels
|
93 |
+
values = vtk.vtkFloatArray()
|
94 |
+
values.SetName("Occupancy")
|
95 |
+
for i in range(len(occupancy_mask)):
|
96 |
+
values.InsertNextValue(occupancy_mask[i])
|
97 |
+
|
98 |
+
# Create rectilinear grid
|
99 |
+
rgrid = vtk.vtkRectilinearGrid()
|
100 |
+
rgrid.SetDimensions(len(x_coords), len(y_coords), len(z_coords))
|
101 |
+
rgrid.SetXCoordinates(x_array)
|
102 |
+
rgrid.SetYCoordinates(y_array)
|
103 |
+
rgrid.SetZCoordinates(z_array)
|
104 |
+
rgrid.GetPointData().SetScalars(values)
|
105 |
+
|
106 |
+
# Save to .vtr
|
107 |
+
writer = vtk.vtkXMLRectilinearGridWriter()
|
108 |
+
writer.SetFileName("res_space/shape.vtr")
|
109 |
+
writer.SetInputData(rgrid)
|
110 |
+
writer.Write()
|
111 |
|