Spaces:
Running
Running
joselobenitezg
commited on
Commit
·
5f51879
1
Parent(s):
9930f16
set tf32 matmul
Browse files- inference/depth.py +3 -112
- inference/normal.py +3 -96
- inference/pose.py +0 -82
inference/depth.py
CHANGED
@@ -1,115 +1,3 @@
|
|
1 |
-
# # Example usage
|
2 |
-
# import torch
|
3 |
-
# import numpy as np
|
4 |
-
# from PIL import Image
|
5 |
-
# from torchvision import transforms
|
6 |
-
# from config import LABELS_TO_IDS
|
7 |
-
# from utils.vis_utils import visualize_mask_with_overlay
|
8 |
-
|
9 |
-
# import torch
|
10 |
-
# import torch.nn.functional as F
|
11 |
-
# import numpy as np
|
12 |
-
# import cv2
|
13 |
-
|
14 |
-
# TASK = 'depth'
|
15 |
-
# VERSION = 'sapiens_0.3b'
|
16 |
-
|
17 |
-
# model_path = get_model_path(TASK, VERSION)
|
18 |
-
# print(model_path)
|
19 |
-
|
20 |
-
# model = torch.jit.load(model_path)
|
21 |
-
# model.eval()
|
22 |
-
# model.to("cuda")
|
23 |
-
|
24 |
-
|
25 |
-
# def get_depth(image, depth_model, input_shape=(3, 1024, 768), device="cuda"):
|
26 |
-
# # Preprocess the image
|
27 |
-
# img = preprocess_image(image, input_shape)
|
28 |
-
|
29 |
-
# # Run the model
|
30 |
-
# with torch.no_grad():
|
31 |
-
# result = depth_model(img.to(device))
|
32 |
-
|
33 |
-
# # Post-process the output
|
34 |
-
# depth_map = post_process_depth(result, (image.shape[0], image.shape[1]))
|
35 |
-
|
36 |
-
# # Visualize the depth map
|
37 |
-
# depth_image = visualize_depth(depth_map)
|
38 |
-
|
39 |
-
# return depth_image, depth_map
|
40 |
-
|
41 |
-
# def preprocess_image(image, input_shape):
|
42 |
-
# img = cv2.resize(image, (input_shape[2], input_shape[1]), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1)
|
43 |
-
# img = torch.from_numpy(img)
|
44 |
-
# img = img[[2, 1, 0], ...].float()
|
45 |
-
# mean = torch.tensor([123.5, 116.5, 103.5]).view(-1, 1, 1)
|
46 |
-
# std = torch.tensor([58.5, 57.0, 57.5]).view(-1, 1, 1)
|
47 |
-
# img = (img - mean) / std
|
48 |
-
# return img.unsqueeze(0)
|
49 |
-
|
50 |
-
# def post_process_depth(result, original_shape):
|
51 |
-
# # Check the dimensionality of the result
|
52 |
-
# if result.dim() == 3:
|
53 |
-
# result = result.unsqueeze(0)
|
54 |
-
# elif result.dim() == 4:
|
55 |
-
# pass
|
56 |
-
# else:
|
57 |
-
# raise ValueError(f"Unexpected result dimension: {result.dim()}")
|
58 |
-
|
59 |
-
# # Ensure we're interpolating to the correct dimensions
|
60 |
-
# seg_logits = F.interpolate(result, size=original_shape, mode="bilinear", align_corners=False).squeeze(0)
|
61 |
-
# depth_map = seg_logits.data.float().cpu().numpy()
|
62 |
-
|
63 |
-
# # If depth_map has an extra dimension, squeeze it
|
64 |
-
# if depth_map.ndim == 3 and depth_map.shape[0] == 1:
|
65 |
-
# depth_map = depth_map.squeeze(0)
|
66 |
-
|
67 |
-
# return depth_map
|
68 |
-
|
69 |
-
# def visualize_depth(depth_map):
|
70 |
-
# # Normalize the depth map
|
71 |
-
# min_val, max_val = np.nanmin(depth_map), np.nanmax(depth_map)
|
72 |
-
# depth_normalized = 1 - ((depth_map - min_val) / (max_val - min_val))
|
73 |
-
|
74 |
-
# # Convert to uint8
|
75 |
-
# depth_normalized = (depth_normalized * 255).astype(np.uint8)
|
76 |
-
|
77 |
-
# # Apply colormap
|
78 |
-
# depth_colored = cv2.applyColorMap(depth_normalized, cv2.COLORMAP_INFERNO)
|
79 |
-
|
80 |
-
# return depth_colored
|
81 |
-
|
82 |
-
# # You can add the surface normal calculation if needed
|
83 |
-
# def calculate_surface_normal(depth_map):
|
84 |
-
# kernel_size = 7
|
85 |
-
# grad_x = cv2.Sobel(depth_map.astype(np.float32), cv2.CV_32F, 1, 0, ksize=kernel_size)
|
86 |
-
# grad_y = cv2.Sobel(depth_map.astype(np.float32), cv2.CV_32F, 0, 1, ksize=kernel_size)
|
87 |
-
# z = np.full(grad_x.shape, -1)
|
88 |
-
# normals = np.dstack((-grad_x, -grad_y, z))
|
89 |
-
|
90 |
-
# normals_mag = np.linalg.norm(normals, axis=2, keepdims=True)
|
91 |
-
# with np.errstate(divide="ignore", invalid="ignore"):
|
92 |
-
# normals_normalized = normals / (normals_mag + 1e-5)
|
93 |
-
|
94 |
-
# normals_normalized = np.nan_to_num(normals_normalized, nan=-1, posinf=-1, neginf=-1)
|
95 |
-
# normal_from_depth = ((normals_normalized + 1) / 2 * 255).astype(np.uint8)
|
96 |
-
# normal_from_depth = normal_from_depth[:, :, ::-1] # RGB to BGR for cv2
|
97 |
-
|
98 |
-
# return normal_from_depth
|
99 |
-
|
100 |
-
# from utils.vis_utils import resize_image
|
101 |
-
|
102 |
-
# pil_image = Image.open('/home/user/app/assets/image.webp')
|
103 |
-
|
104 |
-
# # Load and process an image
|
105 |
-
# image = cv2.imread('/home/user/app/assets/frame.png')
|
106 |
-
# depth_image, depth_map = get_depth(image, model)
|
107 |
-
|
108 |
-
# surface_normal = calculate_surface_normal(depth_map)
|
109 |
-
# cv2.imwrite("output_surface_normal.jpg", surface_normal)
|
110 |
-
# # Save the results
|
111 |
-
# output_im = cv2.imwrite("output_depth_image2.jpg", depth_image)
|
112 |
-
|
113 |
import torch
|
114 |
import torch.nn.functional as F
|
115 |
import numpy as np
|
@@ -121,6 +9,9 @@ def load_model(task, version):
|
|
121 |
try:
|
122 |
model_path = SAPIENS_LITE_MODELS_PATH[task][version]
|
123 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
124 |
model = torch.jit.load(model_path)
|
125 |
model.eval()
|
126 |
model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
import numpy as np
|
|
|
9 |
try:
|
10 |
model_path = SAPIENS_LITE_MODELS_PATH[task][version]
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
|
13 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
14 |
+
torch.backends.cudnn.allow_tf32 = True
|
15 |
model = torch.jit.load(model_path)
|
16 |
model.eval()
|
17 |
model.to(device)
|
inference/normal.py
CHANGED
@@ -1,99 +1,3 @@
|
|
1 |
-
# import torch
|
2 |
-
# import torch.nn.functional as F
|
3 |
-
# import numpy as np
|
4 |
-
# import cv2
|
5 |
-
# from PIL import Image
|
6 |
-
# from config import SAPIENS_LITE_MODELS_PATH
|
7 |
-
|
8 |
-
# # Example usage
|
9 |
-
# TASK = 'normal'
|
10 |
-
# VERSION = 'sapiens_0.3b'
|
11 |
-
|
12 |
-
# model_path = get_model_path(TASK, VERSION)
|
13 |
-
# print(model_path)
|
14 |
-
|
15 |
-
# model = torch.jit.load(model_path)
|
16 |
-
# model.eval()
|
17 |
-
# model.to("cuda")
|
18 |
-
|
19 |
-
# import torch
|
20 |
-
# import torch.nn.functional as F
|
21 |
-
# import numpy as np
|
22 |
-
# import cv2
|
23 |
-
|
24 |
-
# def get_normal(image, normal_model, input_shape=(3, 1024, 768), device="cuda"):
|
25 |
-
# # Preprocess the image
|
26 |
-
# img = preprocess_image(image, input_shape)
|
27 |
-
|
28 |
-
# # Run the model
|
29 |
-
# with torch.no_grad():
|
30 |
-
# result = normal_model(img.to(device))
|
31 |
-
|
32 |
-
# # Post-process the output
|
33 |
-
# normal_map = post_process_normal(result, (image.shape[0], image.shape[1]))
|
34 |
-
|
35 |
-
# # Visualize the normal map
|
36 |
-
# normal_image = visualize_normal(normal_map)
|
37 |
-
|
38 |
-
# return normal_image, normal_map
|
39 |
-
|
40 |
-
# def preprocess_image(image, input_shape):
|
41 |
-
# img = cv2.resize(image, (input_shape[2], input_shape[1]), interpolation=cv2.INTER_LINEAR).transpose(2, 0, 1)
|
42 |
-
# img = torch.from_numpy(img)
|
43 |
-
# img = img[[2, 1, 0], ...].float()
|
44 |
-
# mean = torch.tensor([123.5, 116.5, 103.5]).view(-1, 1, 1)
|
45 |
-
# std = torch.tensor([58.5, 57.0, 57.5]).view(-1, 1, 1)
|
46 |
-
# img = (img - mean) / std
|
47 |
-
# return img.unsqueeze(0)
|
48 |
-
|
49 |
-
# def post_process_normal(result, original_shape):
|
50 |
-
# # Check the dimensionality of the result
|
51 |
-
# if result.dim() == 3:
|
52 |
-
# result = result.unsqueeze(0)
|
53 |
-
# elif result.dim() == 4:
|
54 |
-
# pass
|
55 |
-
# else:
|
56 |
-
# raise ValueError(f"Unexpected result dimension: {result.dim()}")
|
57 |
-
|
58 |
-
# # Ensure we're interpolating to the correct dimensions
|
59 |
-
# seg_logits = F.interpolate(result, size=original_shape, mode="bilinear", align_corners=False).squeeze(0)
|
60 |
-
# normal_map = seg_logits.float().cpu().numpy().transpose(1, 2, 0) # H x W x 3
|
61 |
-
# return normal_map
|
62 |
-
|
63 |
-
# def visualize_normal(normal_map):
|
64 |
-
# normal_map_norm = np.linalg.norm(normal_map, axis=-1, keepdims=True)
|
65 |
-
# normal_map_normalized = normal_map / (normal_map_norm + 1e-5) # Add a small epsilon to avoid division by zero
|
66 |
-
|
67 |
-
# # Convert to 0-255 range and BGR format for visualization
|
68 |
-
# normal_map_vis = ((normal_map_normalized + 1) / 2 * 255).astype(np.uint8)
|
69 |
-
# normal_map_vis = normal_map_vis[:, :, ::-1] # RGB to BGR
|
70 |
-
|
71 |
-
# return normal_map_vis
|
72 |
-
|
73 |
-
# def load_normal_model(checkpoint, use_torchscript=False):
|
74 |
-
# if use_torchscript:
|
75 |
-
# return torch.jit.load(checkpoint)
|
76 |
-
# else:
|
77 |
-
# model = torch.export.load(checkpoint).module()
|
78 |
-
# model = model.to("cuda")
|
79 |
-
# model = torch.compile(model, mode="max-autotune", fullgraph=True)
|
80 |
-
# return model
|
81 |
-
|
82 |
-
# import cv2
|
83 |
-
# import numpy as np
|
84 |
-
|
85 |
-
# # Load the model
|
86 |
-
# normal_model = load_normal_model(model_path, use_torchscript='_torchscript')
|
87 |
-
|
88 |
-
# # Load the image
|
89 |
-
# image = cv2.imread("/home/user/app/assets/image.webp")
|
90 |
-
|
91 |
-
# # Get the normal map and visualization
|
92 |
-
# normal_image, normal_map = get_normal(image, normal_model)
|
93 |
-
|
94 |
-
# # Save the results
|
95 |
-
# cv2.imwrite("output_normal_image.png", normal_image)
|
96 |
-
|
97 |
import torch
|
98 |
import torch.nn.functional as F
|
99 |
import numpy as np
|
@@ -105,6 +9,9 @@ def load_model(task, version):
|
|
105 |
try:
|
106 |
model_path = SAPIENS_LITE_MODELS_PATH[task][version]
|
107 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
108 |
model = torch.jit.load(model_path)
|
109 |
model.eval()
|
110 |
model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
import numpy as np
|
|
|
9 |
try:
|
10 |
model_path = SAPIENS_LITE_MODELS_PATH[task][version]
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
12 |
+
if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
|
13 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
14 |
+
torch.backends.cudnn.allow_tf32 = True
|
15 |
model = torch.jit.load(model_path)
|
16 |
model.eval()
|
17 |
model.to(device)
|
inference/pose.py
CHANGED
@@ -1,85 +1,3 @@
|
|
1 |
-
# import torch
|
2 |
-
# import numpy as np
|
3 |
-
# from PIL import Image
|
4 |
-
# from torchvision import transforms
|
5 |
-
# from config import LABELS_TO_IDS
|
6 |
-
# from utils.vis_utils import visualize_mask_with_overlay
|
7 |
-
|
8 |
-
# # Example usage
|
9 |
-
# TASK = 'pose'
|
10 |
-
# VERSION = 'sapiens_1b'
|
11 |
-
|
12 |
-
# model_path = get_model_path(TASK, VERSION)
|
13 |
-
# print(model_path)
|
14 |
-
|
15 |
-
# model = torch.jit.load(model_path)
|
16 |
-
# model.eval()
|
17 |
-
# model.to("cuda")
|
18 |
-
|
19 |
-
# def get_pose(image, pose_estimator, input_shape=(3, 1024, 768), device="cuda"):
|
20 |
-
# # Preprocess the image
|
21 |
-
# img = preprocess_image(image, input_shape)
|
22 |
-
|
23 |
-
# # Run the model
|
24 |
-
# with torch.no_grad():
|
25 |
-
# heatmap = pose_estimator(img.to(device))
|
26 |
-
|
27 |
-
# # Post-process the output
|
28 |
-
# keypoints, keypoint_scores = udp_decode(heatmap[0].cpu().float().numpy(),
|
29 |
-
# input_shape[1:],
|
30 |
-
# (input_shape[1] // 4, input_shape[2] // 4))
|
31 |
-
|
32 |
-
# # Scale keypoints to original image size
|
33 |
-
# scale_x = image.width / input_shape[2]
|
34 |
-
# scale_y = image.height / input_shape[1]
|
35 |
-
# keypoints[:, 0] *= scale_x
|
36 |
-
# keypoints[:, 1] *= scale_y
|
37 |
-
|
38 |
-
# # Visualize the keypoints on the original image
|
39 |
-
# pose_image = visualize_keypoints(image, keypoints, keypoint_scores)
|
40 |
-
# return pose_image
|
41 |
-
|
42 |
-
# def preprocess_image(image, input_shape):
|
43 |
-
# # Resize and normalize the image
|
44 |
-
# img = image.resize((input_shape[2], input_shape[1]))
|
45 |
-
# img = np.array(img).transpose(2, 0, 1)
|
46 |
-
# img = torch.from_numpy(img).float()
|
47 |
-
# img = img[[2, 1, 0], ...] # RGB to BGR
|
48 |
-
# mean = torch.tensor([123.675, 116.28, 103.53]).view(3, 1, 1)
|
49 |
-
# std = torch.tensor([58.395, 57.12, 57.375]).view(3, 1, 1)
|
50 |
-
# img = (img - mean) / std
|
51 |
-
# return img.unsqueeze(0)
|
52 |
-
|
53 |
-
# def udp_decode(heatmap, img_size, heatmap_size):
|
54 |
-
# # This is a simplified version. You might need to implement the full UDP decode logic
|
55 |
-
# h, w = heatmap_size
|
56 |
-
# keypoints = np.zeros((heatmap.shape[0], 2))
|
57 |
-
# keypoint_scores = np.zeros(heatmap.shape[0])
|
58 |
-
|
59 |
-
# for i in range(heatmap.shape[0]):
|
60 |
-
# hm = heatmap[i]
|
61 |
-
# idx = np.unravel_index(np.argmax(hm), hm.shape)
|
62 |
-
# keypoints[i] = [idx[1] * img_size[1] / w, idx[0] * img_size[0] / h]
|
63 |
-
# keypoint_scores[i] = hm[idx]
|
64 |
-
|
65 |
-
# return keypoints, keypoint_scores
|
66 |
-
|
67 |
-
# def visualize_keypoints(image, keypoints, keypoint_scores, threshold=0.3):
|
68 |
-
# draw = ImageDraw.Draw(image)
|
69 |
-
# for (x, y), score in zip(keypoints, keypoint_scores):
|
70 |
-
# if score > threshold:
|
71 |
-
# draw.ellipse([(x-2, y-2), (x+2, y+2)], fill='red', outline='red')
|
72 |
-
# return image
|
73 |
-
|
74 |
-
# from utils.vis_utils import resize_image
|
75 |
-
# pil_image = Image.open('/home/user/app/assets/image.webp')
|
76 |
-
|
77 |
-
# if pil_image.mode == 'RGBA':
|
78 |
-
# pil_image = pil_image.convert('RGB')
|
79 |
-
|
80 |
-
# output_pose = get_pose(resized_pil_image, model)
|
81 |
-
|
82 |
-
# output_pose
|
83 |
import torch
|
84 |
import numpy as np
|
85 |
from PIL import Image, ImageDraw
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import numpy as np
|
3 |
from PIL import Image, ImageDraw
|