Spaces:
Runtime error
Runtime error
improve demo
Browse files- app.py +26 -16
- segment_anything/modeling/hourglass_image_encoder.py +32 -4
app.py
CHANGED
@@ -7,21 +7,20 @@ import gradio as gr
|
|
7 |
|
8 |
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
9 |
from segment_anything.utils.amg import (
|
10 |
-
|
11 |
-
MaskData,
|
12 |
-
calculate_stability_score,
|
13 |
-
batched_mask_to_box,
|
14 |
-
is_box_near_crop_edge,
|
15 |
)
|
16 |
|
17 |
os.system(r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth')
|
18 |
|
19 |
hourglass_args = {
|
20 |
-
"baseline": {
|
|
|
|
|
|
|
21 |
"1.2x faster": {
|
22 |
"use_hourglass": True,
|
23 |
-
"hourglass_clustering_location":
|
24 |
-
"hourglass_num_cluster":
|
25 |
},
|
26 |
"1.5x faster": {
|
27 |
"use_hourglass": True,
|
@@ -30,13 +29,23 @@ hourglass_args = {
|
|
30 |
},
|
31 |
}
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
def predict(image, speed_mode, points_per_side):
|
34 |
points_per_side = int(points_per_side)
|
35 |
-
mask_generator
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
40 |
start = time.perf_counter()
|
41 |
with torch.no_grad():
|
42 |
masks = mask_generator.generate(image)
|
@@ -52,13 +61,14 @@ def predict(image, speed_mode, points_per_side):
|
|
52 |
color_mask = np.random.random((1, 1, 3))
|
53 |
img = img * (1 - m[..., None]) + color_mask * m[..., None]
|
54 |
|
55 |
-
image = (
|
56 |
return image, eta_text
|
57 |
|
58 |
description = """
|
59 |
# <center>Expedit-SAM (Expedite Segment Anything Model without any training)</center>
|
60 |
Github link: [Link](https://github.com/Expedit-LargeScale-Vision-Transformer/Expedit-SAM)
|
61 |
You can select the speed mode you want to use from the "Speed Mode" dropdown menu and click "Run" to segment the image you uploaded to the "Input Image" box.
|
|
|
62 |
"""
|
63 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
64 |
description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
@@ -84,8 +94,8 @@ def main():
|
|
84 |
multiselect=False,
|
85 |
)
|
86 |
with gr.Row():
|
87 |
-
run_btn = gr.Button(label="Run",
|
88 |
-
clear_btn = gr.Button(label="Clear",
|
89 |
with gr.Column():
|
90 |
output_image = gr.Image(label="Output Image")
|
91 |
eta_label = gr.Label(label="ETA")
|
|
|
7 |
|
8 |
from segment_anything import build_sam, SamAutomaticMaskGenerator
|
9 |
from segment_anything.utils.amg import (
|
10 |
+
build_all_layer_point_grids
|
|
|
|
|
|
|
|
|
11 |
)
|
12 |
|
13 |
os.system(r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth')
|
14 |
|
15 |
hourglass_args = {
|
16 |
+
"baseline": {
|
17 |
+
"use_hourglass": False,
|
18 |
+
"hourglass_clustering_location": -1,
|
19 |
+
},
|
20 |
"1.2x faster": {
|
21 |
"use_hourglass": True,
|
22 |
+
"hourglass_clustering_location": 16,
|
23 |
+
"hourglass_num_cluster": 81,
|
24 |
},
|
25 |
"1.5x faster": {
|
26 |
"use_hourglass": True,
|
|
|
29 |
},
|
30 |
}
|
31 |
|
32 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
33 |
+
mask_generator = SamAutomaticMaskGenerator(
|
34 |
+
build_sam(checkpoint="sam_vit_h_4b8939.pth", use_hourglass=True),
|
35 |
+
)
|
36 |
+
mask_generator.predictor.model.to(device=device)
|
37 |
+
|
38 |
def predict(image, speed_mode, points_per_side):
|
39 |
points_per_side = int(points_per_side)
|
40 |
+
mask_generator.predictor.model.image_encoder.load_hourglass_args(**hourglass_args[speed_mode])
|
41 |
+
if points_per_side is not None:
|
42 |
+
mask_generator.point_grids = build_all_layer_point_grids(
|
43 |
+
points_per_side,
|
44 |
+
mask_generator.crop_n_layers,
|
45 |
+
mask_generator.crop_n_points_downscale_factor,
|
46 |
+
)
|
47 |
+
mask_generator.points_per_batch = 64 if points_per_side > 12 else points_per_side * points_per_side
|
48 |
+
|
49 |
start = time.perf_counter()
|
50 |
with torch.no_grad():
|
51 |
masks = mask_generator.generate(image)
|
|
|
61 |
color_mask = np.random.random((1, 1, 3))
|
62 |
img = img * (1 - m[..., None]) + color_mask * m[..., None]
|
63 |
|
64 |
+
image = (image * 0.65 + img * 255 * 0.35).astype(np.uint8)
|
65 |
return image, eta_text
|
66 |
|
67 |
description = """
|
68 |
# <center>Expedit-SAM (Expedite Segment Anything Model without any training)</center>
|
69 |
Github link: [Link](https://github.com/Expedit-LargeScale-Vision-Transformer/Expedit-SAM)
|
70 |
You can select the speed mode you want to use from the "Speed Mode" dropdown menu and click "Run" to segment the image you uploaded to the "Input Image" box.
|
71 |
+
Points per side is a hyper-parameter that controls the number of points used to generate the segmentation masks. The higher the number, the more accurate the segmentation masks will be, but the slower the inference speed will be. The default value is 12.
|
72 |
"""
|
73 |
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
|
74 |
description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
|
|
94 |
multiselect=False,
|
95 |
)
|
96 |
with gr.Row():
|
97 |
+
run_btn = gr.Button(label="Run", value="Run")
|
98 |
+
clear_btn = gr.Button(label="Clear", value="Clear")
|
99 |
with gr.Column():
|
100 |
output_image = gr.Image(label="Output Image")
|
101 |
eta_label = gr.Label(label="ETA")
|
segment_anything/modeling/hourglass_image_encoder.py
CHANGED
@@ -25,7 +25,7 @@ from .image_encoder import (
|
|
25 |
|
26 |
|
27 |
class TokenClusteringBlock(nn.Module):
|
28 |
-
def __init__(self, num_spixels=None, n_iters=5, temperture=0.
|
29 |
super().__init__()
|
30 |
if isinstance(num_spixels, tuple):
|
31 |
assert len(num_spixels) == 2
|
@@ -182,7 +182,7 @@ class NaiveUnpooling(UnpoolingBase):
|
|
182 |
|
183 |
|
184 |
class TokenReconstructionBlock(UnpoolingBase):
|
185 |
-
def __init__(self, k=
|
186 |
super().__init__()
|
187 |
|
188 |
self.k = k
|
@@ -232,7 +232,7 @@ class HourglassImageEncoderViT(ImageEncoderViT):
|
|
232 |
window_size: int = 0,
|
233 |
global_attn_indexes: Tuple[int, ...] = (),
|
234 |
hourglass_clustering_location: int = -1,
|
235 |
-
hourglass_num_cluster: int =
|
236 |
hourglass_cluster_iters: int = 5,
|
237 |
hourglass_temperture: float = 0.01,
|
238 |
hourglass_cluster_window_size: int = 5,
|
@@ -275,6 +275,8 @@ class HourglassImageEncoderViT(ImageEncoderViT):
|
|
275 |
global_attn_indexes=global_attn_indexes,
|
276 |
)
|
277 |
|
|
|
|
|
278 |
self.window_size = window_size
|
279 |
self.ws_new = int(math.sqrt(hourglass_num_cluster))
|
280 |
|
@@ -356,12 +358,38 @@ class HourglassImageEncoderViT(ImageEncoderViT):
|
|
356 |
x, pad_hw = self.cluster(x, reconstructer)
|
357 |
x = blk(x)
|
358 |
|
359 |
-
x
|
|
|
360 |
|
361 |
x = self.neck(x.permute(0, 3, 1, 2))
|
362 |
|
363 |
return x
|
364 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
365 |
|
366 |
class HourglassBlock(Block):
|
367 |
"""Transformer blocks with support of window attention and residual propagation blocks"""
|
|
|
25 |
|
26 |
|
27 |
class TokenClusteringBlock(nn.Module):
|
28 |
+
def __init__(self, num_spixels=None, n_iters=5, temperture=0.01, window_size=5):
|
29 |
super().__init__()
|
30 |
if isinstance(num_spixels, tuple):
|
31 |
assert len(num_spixels) == 2
|
|
|
182 |
|
183 |
|
184 |
class TokenReconstructionBlock(UnpoolingBase):
|
185 |
+
def __init__(self, k=20, temperture=0.01):
|
186 |
super().__init__()
|
187 |
|
188 |
self.k = k
|
|
|
232 |
window_size: int = 0,
|
233 |
global_attn_indexes: Tuple[int, ...] = (),
|
234 |
hourglass_clustering_location: int = -1,
|
235 |
+
hourglass_num_cluster: int = 100,
|
236 |
hourglass_cluster_iters: int = 5,
|
237 |
hourglass_temperture: float = 0.01,
|
238 |
hourglass_cluster_window_size: int = 5,
|
|
|
275 |
global_attn_indexes=global_attn_indexes,
|
276 |
)
|
277 |
|
278 |
+
hourglass_clustering_location = hourglass_clustering_location if hourglass_clustering_location >= 0 else depth + 1
|
279 |
+
|
280 |
self.window_size = window_size
|
281 |
self.ws_new = int(math.sqrt(hourglass_num_cluster))
|
282 |
|
|
|
358 |
x, pad_hw = self.cluster(x, reconstructer)
|
359 |
x = blk(x)
|
360 |
|
361 |
+
if x.shape[1] != H or x.shape[2] != W:
|
362 |
+
x = self.reconstruct(x, H, W, reconstructer, pad_hw)
|
363 |
|
364 |
x = self.neck(x.permute(0, 3, 1, 2))
|
365 |
|
366 |
return x
|
367 |
|
368 |
+
def load_hourglass_args(self, **hourglass_args):
|
369 |
+
hourglass_clustering_location = hourglass_args.get('hourglass_clustering_location', self.clustering_location)
|
370 |
+
hourglass_num_cluster = hourglass_args.get('hourglass_num_cluster', self.token_clustering_block.num_spixels[0] * self.token_clustering_block.num_spixels[1])
|
371 |
+
hourglass_cluster_iters = hourglass_args.get('hourglass_cluster_iters', self.token_clustering_block.n_iters)
|
372 |
+
hourglass_temperture = hourglass_args.get('hourglass_temperture', self.token_clustering_block.temperture)
|
373 |
+
hourglass_cluster_window_size = hourglass_args.get('hourglass_cluster_window_size', self.token_clustering_block.r * 2 + 1)
|
374 |
+
hourglass_reconstruction_k = hourglass_args.get('hourglass_reconstruction_k', self.token_reconstruction_block.k)
|
375 |
+
|
376 |
+
self.clustering_location = hourglass_clustering_location if hourglass_clustering_location >= 0 else len(self.blocks) + 1
|
377 |
+
|
378 |
+
self.ws_new = int(math.sqrt(hourglass_num_cluster))
|
379 |
+
for i, blk in enumerate(self.blocks):
|
380 |
+
blk.window_size = (self.window_size if i < self.clustering_location else self.ws_new) if blk.window_size != 0 else 0
|
381 |
+
|
382 |
+
self.token_clustering_block = TokenClusteringBlock(
|
383 |
+
num_spixels=hourglass_num_cluster,
|
384 |
+
n_iters=hourglass_cluster_iters,
|
385 |
+
temperture=hourglass_temperture,
|
386 |
+
window_size=hourglass_cluster_window_size,
|
387 |
+
)
|
388 |
+
self.token_reconstruction_block = TokenReconstructionBlock(
|
389 |
+
k=hourglass_reconstruction_k,
|
390 |
+
temperture=hourglass_temperture,
|
391 |
+
)
|
392 |
+
|
393 |
|
394 |
class HourglassBlock(Block):
|
395 |
"""Transformer blocks with support of window attention and residual propagation blocks"""
|