Spaces:
Runtime error
Runtime error
update version
Browse files- app.py +11 -12
- demo.py +94 -0
- requirements.txt +1 -1
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
from metaseg import SegAutoMaskGenerator
|
4 |
|
5 |
|
6 |
def image_app():
|
@@ -37,18 +36,24 @@ def image_app():
|
|
37 |
label="Points per Batch",
|
38 |
)
|
39 |
|
|
|
|
|
|
|
|
|
|
|
40 |
seg_automask_image_predict = gr.Button(value="Generator")
|
41 |
|
42 |
with gr.Column():
|
43 |
output_image = gr.Image()
|
44 |
|
45 |
seg_automask_image_predict.click(
|
46 |
-
fn=
|
47 |
inputs=[
|
48 |
seg_automask_image_file,
|
49 |
seg_automask_image_model_type,
|
50 |
seg_automask_image_points_per_side,
|
51 |
seg_automask_image_points_per_batch,
|
|
|
52 |
],
|
53 |
outputs=[output_image],
|
54 |
)
|
@@ -93,24 +98,18 @@ def video_app():
|
|
93 |
label="Min Area",
|
94 |
)
|
95 |
|
96 |
-
seg_automask_video_max_area = gr.Number(
|
97 |
-
value=10000,
|
98 |
-
label="Max Area",
|
99 |
-
)
|
100 |
-
|
101 |
seg_automask_video_predict = gr.Button(value="Generator")
|
102 |
with gr.Column():
|
103 |
output_video = gr.Video()
|
104 |
|
105 |
seg_automask_video_predict.click(
|
106 |
-
fn=
|
107 |
inputs=[
|
108 |
seg_automask_video_file,
|
109 |
seg_automask_video_model_type,
|
110 |
seg_automask_video_points_per_side,
|
111 |
seg_automask_video_points_per_batch,
|
112 |
seg_automask_video_min_area,
|
113 |
-
seg_automask_video_max_area,
|
114 |
],
|
115 |
outputs=[output_video],
|
116 |
)
|
@@ -127,7 +126,7 @@ def metaseg_app():
|
|
127 |
<a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a> |
|
128 |
</h5>
|
129 |
"""
|
130 |
-
)
|
131 |
|
132 |
with gr.Row():
|
133 |
with gr.Column():
|
@@ -136,7 +135,7 @@ def metaseg_app():
|
|
136 |
with gr.Tab("Video"):
|
137 |
video_app()
|
138 |
|
139 |
-
app.queue(concurrency_count=
|
140 |
app.launch(debug=True, enable_queue=True)
|
141 |
|
142 |
|
|
|
1 |
import gradio as gr
|
2 |
+
from metaseg import SegAutoMaskPredictor
|
|
|
3 |
|
4 |
|
5 |
def image_app():
|
|
|
36 |
label="Points per Batch",
|
37 |
)
|
38 |
|
39 |
+
seg_automask_image_min_area = gr.Number(
|
40 |
+
value=0,
|
41 |
+
label="Min Area",
|
42 |
+
)
|
43 |
+
|
44 |
seg_automask_image_predict = gr.Button(value="Generator")
|
45 |
|
46 |
with gr.Column():
|
47 |
output_image = gr.Image()
|
48 |
|
49 |
seg_automask_image_predict.click(
|
50 |
+
fn=SegAutoMaskPredictor().image_predict,
|
51 |
inputs=[
|
52 |
seg_automask_image_file,
|
53 |
seg_automask_image_model_type,
|
54 |
seg_automask_image_points_per_side,
|
55 |
seg_automask_image_points_per_batch,
|
56 |
+
seg_automask_image_min_area,
|
57 |
],
|
58 |
outputs=[output_image],
|
59 |
)
|
|
|
98 |
label="Min Area",
|
99 |
)
|
100 |
|
|
|
|
|
|
|
|
|
|
|
101 |
seg_automask_video_predict = gr.Button(value="Generator")
|
102 |
with gr.Column():
|
103 |
output_video = gr.Video()
|
104 |
|
105 |
seg_automask_video_predict.click(
|
106 |
+
fn=SegAutoMaskPredictor().video_predict,
|
107 |
inputs=[
|
108 |
seg_automask_video_file,
|
109 |
seg_automask_video_model_type,
|
110 |
seg_automask_video_points_per_side,
|
111 |
seg_automask_video_points_per_batch,
|
112 |
seg_automask_video_min_area,
|
|
|
113 |
],
|
114 |
outputs=[output_video],
|
115 |
)
|
|
|
126 |
<a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a> |
|
127 |
</h5>
|
128 |
"""
|
129 |
+
)
|
130 |
|
131 |
with gr.Row():
|
132 |
with gr.Column():
|
|
|
135 |
with gr.Tab("Video"):
|
136 |
video_app()
|
137 |
|
138 |
+
app.queue(concurrency_count=1)
|
139 |
app.launch(debug=True, enable_queue=True)
|
140 |
|
141 |
|
demo.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from metaseg import SegAutoMaskPredictor, SegManualMaskPredictor, SahiAutoSegmentation, sahi_sliced_predict
|
2 |
+
|
3 |
+
# For image
|
4 |
+
|
5 |
+
|
6 |
+
def image_app(image_path, model_type, points_per_side, points_per_batch, min_area):
|
7 |
+
SegAutoMaskPredictor().image_predict(
|
8 |
+
source=image_path,
|
9 |
+
model_type=model_type, # vit_l, vit_h, vit_b
|
10 |
+
points_per_side=points_per_side,
|
11 |
+
points_per_batch=points_per_batch,
|
12 |
+
min_area=min_area,
|
13 |
+
output_path="output.png",
|
14 |
+
show=False,
|
15 |
+
save=True,
|
16 |
+
)
|
17 |
+
return "output.png"
|
18 |
+
|
19 |
+
|
20 |
+
# For video
|
21 |
+
|
22 |
+
|
23 |
+
def video_app(video_path, model_type, points_per_side, points_per_batch, min_area):
|
24 |
+
SegAutoMaskPredictor().video_predict(
|
25 |
+
source=video_path,
|
26 |
+
model_type=model_type, # vit_l, vit_h, vit_b
|
27 |
+
points_per_side=points_per_side,
|
28 |
+
points_per_batch=points_per_batch,
|
29 |
+
min_area=min_area,
|
30 |
+
output_path="output.mp4",
|
31 |
+
show=False,
|
32 |
+
save=True,
|
33 |
+
)
|
34 |
+
return "output.mp4"
|
35 |
+
|
36 |
+
|
37 |
+
# For manuel box and point selection
|
38 |
+
|
39 |
+
|
40 |
+
def manual_app(image_path, model_type, input_point, input_label, input_box, multimask_output, random_color):
|
41 |
+
SegManualMaskPredictor().image_predict(
|
42 |
+
source=image_path,
|
43 |
+
model_type=model_type, # vit_l, vit_h, vit_b
|
44 |
+
input_point=input_point,
|
45 |
+
input_label=input_label,
|
46 |
+
input_box=input_box,
|
47 |
+
multimask_output=multimask_output,
|
48 |
+
random_color=random_color,
|
49 |
+
output_path="output.png",
|
50 |
+
show=False,
|
51 |
+
save=True,
|
52 |
+
)
|
53 |
+
return "output.png"
|
54 |
+
|
55 |
+
|
56 |
+
# For sahi sliced prediction
|
57 |
+
|
58 |
+
from metaseg import SahiAutoSegmentation, sahi_sliced_predict
|
59 |
+
|
60 |
+
|
61 |
+
def sahi_app(
|
62 |
+
image_path,
|
63 |
+
detection_model_type,
|
64 |
+
detection_model_path,
|
65 |
+
conf_th,
|
66 |
+
image_size,
|
67 |
+
slice_height,
|
68 |
+
slice_width,
|
69 |
+
overlap_height_ratio,
|
70 |
+
overlap_width_ratio,
|
71 |
+
):
|
72 |
+
boxes = sahi_sliced_predict(
|
73 |
+
image_path=image_path,
|
74 |
+
detection_model_type=detection_model_type, # yolov8, detectron2, mmdetection, torchvision
|
75 |
+
detection_model_path=detection_model_path,
|
76 |
+
conf_th=conf_th,
|
77 |
+
image_size=image_size,
|
78 |
+
slice_height=slice_height,
|
79 |
+
slice_width=slice_width,
|
80 |
+
overlap_height_ratio=overlap_height_ratio,
|
81 |
+
overlap_width_ratio=overlap_width_ratio,
|
82 |
+
)
|
83 |
+
|
84 |
+
autoseg = SahiAutoSegmentation().predict(
|
85 |
+
source=image_path,
|
86 |
+
model_type="vit_b",
|
87 |
+
input_box=boxes,
|
88 |
+
multimask_output=False,
|
89 |
+
random_color=False,
|
90 |
+
show=False,
|
91 |
+
save=True,
|
92 |
+
)
|
93 |
+
|
94 |
+
return "output.png"
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
metaseg==0.
|
2 |
|
3 |
# code formatting
|
4 |
black==21.7b0
|
|
|
1 |
+
metaseg==0.5.2
|
2 |
|
3 |
# code formatting
|
4 |
black==21.7b0
|