Spaces:
Running
Running
Add description and title again
Browse files
app.py
CHANGED
@@ -178,9 +178,11 @@ examples=[
|
|
178 |
|
179 |
|
180 |
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
184 |
<b>
|
185 |
<p style="text-align:center">
|
186 |
<a href='https://twitter.com/dacl_ai' target='_blank'>Twitter</a><a href='https://x.com/dacl_ai' target='_blank'>/X</a> |
|
@@ -214,11 +216,25 @@ with gr.Blocks() as app:
|
|
214 |
<li>Model: <a href='https://huggingface.co/nvidia/mit-b1' target='_blank'>SegFormer mit-b1</a>, trained on resized 512x512 images for (only) 10 epochs.</li>
|
215 |
<li>Label description of dacl10k dataset: "A.3. Class descriptions" in <a href='https://arxiv.org/pdf/2309.00460.pdf' target='_blank'>J. Flotzinger, P.J. Rösch, T. Braml: "dacl10k: Benchmark for Semantic Bridge Damage Segmentation".</a></li>
|
216 |
</ul>
|
|
|
|
|
217 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
"""
|
219 |
|
220 |
-
|
|
|
|
|
221 |
|
|
|
|
|
|
|
222 |
with gr.Row():
|
223 |
input_img = gr.inputs.Image(type="pil", label="Original Image")
|
224 |
gr.Examples(examples=examples, inputs=[input_img])
|
@@ -226,8 +242,8 @@ with gr.Blocks() as app:
|
|
226 |
img = gr.outputs.Image(type="pil", label="All Masks")
|
227 |
transparent_img = gr.outputs.Image(type="pil", label="Transparent Image")
|
228 |
with gr.Row():
|
229 |
-
|
230 |
-
|
231 |
|
232 |
all_masks = gr.Gallery(visible=False)
|
233 |
background = gr.Image(visible=False)
|
|
|
178 |
|
179 |
|
180 |
|
181 |
+
title = "dacl-challenge @ WACV2024"
|
182 |
+
description = """
|
183 |
+
<p style="text-align:center">
|
184 |
+
<h1>dacl-challenge @ WACV2024</h1>
|
185 |
+
</p>
|
186 |
<b>
|
187 |
<p style="text-align:center">
|
188 |
<a href='https://twitter.com/dacl_ai' target='_blank'>Twitter</a><a href='https://x.com/dacl_ai' target='_blank'>/X</a> |
|
|
|
216 |
<li>Model: <a href='https://huggingface.co/nvidia/mit-b1' target='_blank'>SegFormer mit-b1</a>, trained on resized 512x512 images for (only) 10 epochs.</li>
|
217 |
<li>Label description of dacl10k dataset: "A.3. Class descriptions" in <a href='https://arxiv.org/pdf/2309.00460.pdf' target='_blank'>J. Flotzinger, P.J. Rösch, T. Braml: "dacl10k: Benchmark for Semantic Bridge Damage Segmentation".</a></li>
|
218 |
</ul>
|
219 |
+
<p></p>
|
220 |
+
|
221 |
|
222 |
+
<p>Workflow:
|
223 |
+
<ul>
|
224 |
+
<li>Upload an image or select one from "Examples". </li>
|
225 |
+
<li>Then click "1) Generate Masks"</li>
|
226 |
+
<li>Select an damage or object type in "Select Label" and choose an "Alpha Factor" for transparancy.</li>
|
227 |
+
<li>Then click "2) Generate Transparent Mask (with Alpha Factor)"</li>
|
228 |
+
</ul>
|
229 |
"""
|
230 |
|
231 |
+
article = "<p style='text-align: center'><a href='https://github.com/phiyodr/dacl10k-toolkit' target='_blank'>Github Repo</a></p>"
|
232 |
+
|
233 |
+
|
234 |
|
235 |
+
with gr.Blocks() as app:
|
236 |
+
with gr.Row():
|
237 |
+
gr.Markdown(description)
|
238 |
with gr.Row():
|
239 |
input_img = gr.inputs.Image(type="pil", label="Original Image")
|
240 |
gr.Examples(examples=examples, inputs=[input_img])
|
|
|
242 |
img = gr.outputs.Image(type="pil", label="All Masks")
|
243 |
transparent_img = gr.outputs.Image(type="pil", label="Transparent Image")
|
244 |
with gr.Row():
|
245 |
+
dropdown = gr.Dropdown(choices=target_list_all, label="Select Label", value="All")
|
246 |
+
slider = gr.Slider(minimum=0, maximum=1, value=0.4, label="Alpha Factor")
|
247 |
|
248 |
all_masks = gr.Gallery(visible=False)
|
249 |
background = gr.Image(visible=False)
|