Spaces:
Running
Running
commit
Browse files- app.py +14 -8
- requirements.txt +7 -0
app.py
CHANGED
@@ -4,6 +4,8 @@ from gradio_client import Client, handle_file
|
|
4 |
from pathlib import Path
|
5 |
from gradio.utils import get_cache_folder
|
6 |
|
|
|
|
|
7 |
from PIL import Image
|
8 |
|
9 |
class Examples(gr.helpers.Examples):
|
@@ -63,6 +65,7 @@ def process_image_4(image_path, prompt):
|
|
63 |
|
64 |
inputs = []
|
65 |
for p in prompt:
|
|
|
66 |
image = Image.open(image_path)
|
67 |
|
68 |
w, h = image.size
|
@@ -78,7 +81,7 @@ def process_image_4(image_path, prompt):
|
|
78 |
'input_images': image.unsqueeze(0),
|
79 |
'original_size': torch.tensor([[w,h]]),
|
80 |
'target_size': torch.tensor([[768, 768]]),
|
81 |
-
'prompt': [
|
82 |
'coor_point': coor_point,
|
83 |
'point_labels': point_labels,
|
84 |
'generator': generator
|
@@ -88,11 +91,13 @@ def process_image_4(image_path, prompt):
|
|
88 |
return inputs
|
89 |
|
90 |
|
91 |
-
def
|
|
|
|
|
92 |
inputs = process_image_4(image_path, prompt)
|
93 |
-
return None
|
94 |
return client.predict(
|
95 |
-
|
96 |
api_name="/inf"
|
97 |
)
|
98 |
|
@@ -154,9 +159,10 @@ def run_demo_server():
|
|
154 |
queue=False,
|
155 |
).success(
|
156 |
# fn=process_pipe_matting,
|
157 |
-
fn=
|
158 |
inputs=[
|
159 |
matting_image_input,
|
|
|
160 |
],
|
161 |
outputs=[matting_image_output],
|
162 |
concurrency_limit=1,
|
@@ -176,11 +182,11 @@ def run_demo_server():
|
|
176 |
)
|
177 |
|
178 |
gr.Examples(
|
179 |
-
fn=
|
180 |
examples=[
|
181 |
-
"assets/person.jpg",
|
182 |
],
|
183 |
-
inputs=[matting_image_input],
|
184 |
outputs=[matting_image_output],
|
185 |
cache_examples=True,
|
186 |
# cache_examples=False,
|
|
|
4 |
from pathlib import Path
|
5 |
from gradio.utils import get_cache_folder
|
6 |
|
7 |
+
import torch
|
8 |
+
|
9 |
from PIL import Image
|
10 |
|
11 |
class Examples(gr.helpers.Examples):
|
|
|
65 |
|
66 |
inputs = []
|
67 |
for p in prompt:
|
68 |
+
cur_p = map_prompt[p]
|
69 |
image = Image.open(image_path)
|
70 |
|
71 |
w, h = image.size
|
|
|
81 |
'input_images': image.unsqueeze(0),
|
82 |
'original_size': torch.tensor([[w,h]]),
|
83 |
'target_size': torch.tensor([[768, 768]]),
|
84 |
+
'prompt': [cur_p],
|
85 |
'coor_point': coor_point,
|
86 |
'point_labels': point_labels,
|
87 |
'generator': generator
|
|
|
91 |
return inputs
|
92 |
|
93 |
|
94 |
+
def inf(image_path, prompt):
|
95 |
+
print(image_path)
|
96 |
+
print(prompt)
|
97 |
inputs = process_image_4(image_path, prompt)
|
98 |
+
# return None
|
99 |
return client.predict(
|
100 |
+
data=inputs,
|
101 |
api_name="/inf"
|
102 |
)
|
103 |
|
|
|
159 |
queue=False,
|
160 |
).success(
|
161 |
# fn=process_pipe_matting,
|
162 |
+
fn=inf,
|
163 |
inputs=[
|
164 |
matting_image_input,
|
165 |
+
checkbox_group
|
166 |
],
|
167 |
outputs=[matting_image_output],
|
168 |
concurrency_limit=1,
|
|
|
182 |
)
|
183 |
|
184 |
gr.Examples(
|
185 |
+
fn=inf,
|
186 |
examples=[
|
187 |
+
["assets/person.jpg", ['depth', 'normal', 'entity', 'pose']]
|
188 |
],
|
189 |
+
inputs=[matting_image_input, checkbox_group],
|
190 |
outputs=[matting_image_output],
|
191 |
cache_examples=True,
|
192 |
# cache_examples=False,
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate
|
2 |
+
diffusers
|
3 |
+
invisible_watermark
|
4 |
+
torch
|
5 |
+
transformers
|
6 |
+
xformers
|
7 |
+
sentencepiece
|