Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -80,7 +80,6 @@ def FlowEditRun(
|
|
80 |
n_min: int,
|
81 |
n_avg: int,
|
82 |
seed: int,
|
83 |
-
|
84 |
):
|
85 |
|
86 |
if not len(src_prompt):
|
@@ -183,18 +182,13 @@ intro = """
|
|
183 |
<a href="https://matankleiner.github.io/flowedit/">[Project Page]</a> |
|
184 |
<a href="https://github.com/fallenshock/FlowEdit">[Code]</a>
|
185 |
</h3>
|
186 |
-
|
187 |
-
|
188 |
-
<br>
|
189 |
-
<br>Edit your image using Flow models! upload an image, add a description of it, and specify the edits you want to make.
|
190 |
<h3>Notes:</h3>
|
191 |
-
|
192 |
<ol>
|
193 |
<li>We use FLUX.1 dev and SD3 for the demo. The models are large and may take a while to load.</li>
|
194 |
<li>We recommend 1024x1024 images for the best results. If the input images are too large, there may be out-of-memory errors.</li>
|
195 |
<li>Default hyperparameters for each model used in the paper are provided as examples. Feel free to experiment with them as well.</li>
|
196 |
</ol>
|
197 |
-
|
198 |
"""
|
199 |
|
200 |
# article = """
|
@@ -210,43 +204,44 @@ intro = """
|
|
210 |
# ```
|
211 |
# """
|
212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
-
with gr.
|
215 |
-
|
216 |
|
217 |
-
|
218 |
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
with gr.Row(equal_height=True):
|
223 |
-
image_src = gr.Image(type="filepath", label="Source Image", value="inputs/cat.png",)
|
224 |
-
image_tar = gr.Image(label="Output", type="pil", show_label=True, format="png",),
|
225 |
-
|
226 |
-
with gr.Row():
|
227 |
-
src_prompt = gr.Textbox(lines=2, label="Source Prompt", value="a cat sitting in the grass")
|
228 |
-
|
229 |
-
with gr.Row():
|
230 |
-
tar_prompt = gr.Textbox(lines=2, label="Target Prompt", value="a puppy sitting in the grass")
|
231 |
-
|
232 |
-
with gr.Row():
|
233 |
-
model_type = gr.Dropdown(["SD3", "FLUX"], label="Model Type", value="SD3")
|
234 |
-
T_steps = gr.Number(value=50, label="Total Steps", minimum=1, maximum=50)
|
235 |
-
n_max = gr.Number(value=33, label="n_max (control the strength of the edit)")
|
236 |
-
|
237 |
-
with gr.Row():
|
238 |
-
src_guidance_scale = gr.Slider(minimum=1.0, maximum=30.0, value=3.5, label="src_guidance_scale")
|
239 |
-
tar_guidance_scale = gr.Slider(minimum=1.0, maximum=30.0, value=13.5, label="tar_guidance_scale")
|
240 |
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
|
251 |
|
252 |
|
@@ -267,7 +262,7 @@ with gr.Blocks() as demo:
|
|
267 |
seed,
|
268 |
],
|
269 |
outputs=[
|
270 |
-
image_tar
|
271 |
],
|
272 |
)
|
273 |
|
@@ -275,8 +270,8 @@ with gr.Blocks() as demo:
|
|
275 |
gr.Examples(
|
276 |
label="Examples",
|
277 |
examples=get_examples(),
|
278 |
-
inputs=[image_src, model_type, T_steps, src_guidance_scale, tar_guidance_scale, n_max, src_prompt, tar_prompt, n_min, n_avg, seed, image_tar
|
279 |
-
outputs=[image_tar
|
280 |
)
|
281 |
|
282 |
model_type.input(fn=on_model_change, inputs=[model_type], outputs=[T_steps, src_guidance_scale, tar_guidance_scale, n_max])
|
@@ -284,4 +279,4 @@ with gr.Blocks() as demo:
|
|
284 |
|
285 |
# gr.HTML(article)
|
286 |
demo.queue()
|
287 |
-
demo.launch( )
|
|
|
80 |
n_min: int,
|
81 |
n_avg: int,
|
82 |
seed: int,
|
|
|
83 |
):
|
84 |
|
85 |
if not len(src_prompt):
|
|
|
182 |
<a href="https://matankleiner.github.io/flowedit/">[Project Page]</a> |
|
183 |
<a href="https://github.com/fallenshock/FlowEdit">[Code]</a>
|
184 |
</h3>
|
185 |
+
<br> π¨ Edit your image using Flow models! upload an image, add a description of it, and specify the edits you want to make
|
|
|
|
|
|
|
186 |
<h3>Notes:</h3>
|
|
|
187 |
<ol>
|
188 |
<li>We use FLUX.1 dev and SD3 for the demo. The models are large and may take a while to load.</li>
|
189 |
<li>We recommend 1024x1024 images for the best results. If the input images are too large, there may be out-of-memory errors.</li>
|
190 |
<li>Default hyperparameters for each model used in the paper are provided as examples. Feel free to experiment with them as well.</li>
|
191 |
</ol>
|
|
|
192 |
"""
|
193 |
|
194 |
# article = """
|
|
|
204 |
# ```
|
205 |
# """
|
206 |
|
207 |
+
css="""
|
208 |
+
#col-container {
|
209 |
+
margin: 0 auto;
|
210 |
+
max-width: 960px;
|
211 |
+
}
|
212 |
+
"""
|
213 |
+
with gr.Blocks(css=css) as demo:
|
214 |
|
215 |
+
with gr.Column(elem_id="col-container"):
|
|
|
216 |
|
217 |
+
gr.HTML(intro)
|
218 |
|
219 |
+
# with gr.Row():
|
220 |
+
# gr.LoginButton(value="Login to HF (For SD3 and FLUX access)", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
|
222 |
+
with gr.Row():
|
223 |
+
with gr.Column():
|
224 |
+
image_src = gr.Image(type="filepath", label="Source Image", value="inputs/cat.png",)
|
225 |
+
src_prompt = gr.Textbox(lines=2, label="Source Prompt", value="a cat sitting in the grass")
|
226 |
+
tar_prompt = gr.Textbox(lines=2, label="Target Prompt", value="a puppy sitting in the grass")
|
227 |
+
submit_button = gr.Button("Run FlowEdit", variant="primary")
|
228 |
+
|
229 |
+
with gr.Row():
|
230 |
+
model_type = gr.Dropdown(["SD3", "FLUX"], label="Model Type", value="SD3")
|
231 |
+
T_steps = gr.Slider(value=50, label="Total Steps", minimum=1, maximum=50)
|
232 |
+
n_max = gr.Slider(minimum=1, maximum=50, value=33, label="n_max", info="control the strength of the edit")
|
233 |
+
src_guidance_scale = gr.Slider(minimum=1.0, maximum=30.0, value=3.5, label="src_guidance_scale")
|
234 |
+
tar_guidance_scale = gr.Slider(minimum=1.0, maximum=30.0, value=13.5, label="tar_guidance_scale")
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
with gr.Column():
|
239 |
+
image_tar = gr.Image(label="Output", type="pil", show_label=True, format="png",)
|
240 |
+
with gr.Accordion(label="Advanced Settings", open=False):
|
241 |
+
# additional inputs
|
242 |
+
n_min = gr.Number(value=0, label="n_min (for improved style edits)")
|
243 |
+
n_avg = gr.Number(value=1, label="n_avg (improve structure at the cost of runtime)", minimum=1)
|
244 |
+
seed = gr.Number(value=42, label="seed")
|
245 |
|
246 |
|
247 |
|
|
|
262 |
seed,
|
263 |
],
|
264 |
outputs=[
|
265 |
+
image_tar,
|
266 |
],
|
267 |
)
|
268 |
|
|
|
270 |
gr.Examples(
|
271 |
label="Examples",
|
272 |
examples=get_examples(),
|
273 |
+
inputs=[image_src, model_type, T_steps, src_guidance_scale, tar_guidance_scale, n_max, src_prompt, tar_prompt, n_min, n_avg, seed, image_tar],
|
274 |
+
outputs=[image_tar],
|
275 |
)
|
276 |
|
277 |
model_type.input(fn=on_model_change, inputs=[model_type], outputs=[T_steps, src_guidance_scale, tar_guidance_scale, n_max])
|
|
|
279 |
|
280 |
# gr.HTML(article)
|
281 |
demo.queue()
|
282 |
+
demo.launch( )
|