SkalskiP commited on
Commit
b887a7c
·
1 Parent(s): 4f5bd18

test examples

Browse files
Files changed (1) hide show
  1. app.py +63 -3
app.py CHANGED
@@ -4,6 +4,7 @@ from typing import Tuple, Optional
4
 
5
  import gradio as gr
6
  import numpy as np
 
7
  import spaces
8
  import torch
9
  from PIL import Image, ImageFilter
@@ -23,6 +24,43 @@ IMAGE_SIZE = 1024
23
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
24
  PIPE = FluxInpaintPipeline.from_pretrained(
25
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
  def calculate_image_dimensions_for_flux(
@@ -84,7 +122,7 @@ def set_client_for_session(request: gr.Request):
84
  return Client("SkalskiP/florence-sam-masking", headers={"X-IP-Token": x_ip_token})
85
 
86
 
87
- @spaces.GPU(duration=100)
88
  def run_flux(
89
  image: Image.Image,
90
  mask: Image.Image,
@@ -187,7 +225,7 @@ with gr.Blocks() as demo:
187
 
188
  with gr.Row():
189
  inpainting_prompt_text_component = gr.Text(
190
- label="Prompt",
191
  show_label=False,
192
  max_lines=1,
193
  placeholder="Enter text to generate inpainting",
@@ -198,7 +236,7 @@ with gr.Blocks() as demo:
198
 
199
  with gr.Accordion("Advanced Settings", open=False):
200
  masking_prompt_text_component = gr.Text(
201
- label="Prompt",
202
  show_label=False,
203
  max_lines=1,
204
  placeholder="Enter text to generate masking",
@@ -264,6 +302,28 @@ with gr.Blocks() as demo:
264
  with gr.Accordion("Debug", open=False):
265
  output_mask_component = gr.Image(
266
  type='pil', image_mode='RGB', label='Input mask', format="png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
  submit_button_component.click(
269
  fn=process,
 
4
 
5
  import gradio as gr
6
  import numpy as np
7
+ import requests
8
  import spaces
9
  import torch
10
  from PIL import Image, ImageFilter
 
24
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
25
  PIPE = FluxInpaintPipeline.from_pretrained(
26
  "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
27
+ CLIENT = Client("SkalskiP/florence-sam-masking")
28
+
29
+
30
+ EXAMPLES = [
31
+ [
32
+ None,
33
+ {
34
+ "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
35
+ "layers": [Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2-removebg.png", stream=True).raw)],
36
+ "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
37
+ },
38
+ "little lion",
39
+ None,
40
+ 5,
41
+ 5,
42
+ 42,
43
+ False,
44
+ 0.85,
45
+ 30
46
+ ],
47
+ [
48
+ None,
49
+ {
50
+ "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
51
+ "layers": [Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-3-removebg.png", stream=True).raw)],
52
+ "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-3.png", stream=True).raw),
53
+ },
54
+ "tribal tattoos",
55
+ None,
56
+ 5,
57
+ 5,
58
+ 42,
59
+ False,
60
+ 0.85,
61
+ 30
62
+ ]
63
+ ]
64
 
65
 
66
  def calculate_image_dimensions_for_flux(
 
122
  return Client("SkalskiP/florence-sam-masking", headers={"X-IP-Token": x_ip_token})
123
 
124
 
125
+ @spaces.GPU(duration=50)
126
  def run_flux(
127
  image: Image.Image,
128
  mask: Image.Image,
 
225
 
226
  with gr.Row():
227
  inpainting_prompt_text_component = gr.Text(
228
+ label="Inpainting prompt",
229
  show_label=False,
230
  max_lines=1,
231
  placeholder="Enter text to generate inpainting",
 
236
 
237
  with gr.Accordion("Advanced Settings", open=False):
238
  masking_prompt_text_component = gr.Text(
239
+ label="Masking prompt",
240
  show_label=False,
241
  max_lines=1,
242
  placeholder="Enter text to generate masking",
 
302
  with gr.Accordion("Debug", open=False):
303
  output_mask_component = gr.Image(
304
  type='pil', image_mode='RGB', label='Input mask', format="png")
305
+ gr.Examples(
306
+ fn=process,
307
+ examples=EXAMPLES,
308
+ inputs=[
309
+ client_component,
310
+ input_image_editor_component,
311
+ inpainting_prompt_text_component,
312
+ masking_prompt_text_component,
313
+ mask_inflation_slider_component,
314
+ mask_blur_slider_component,
315
+ seed_slicer_component,
316
+ randomize_seed_checkbox_component,
317
+ strength_slider_component,
318
+ num_inference_steps_slider_component
319
+ ],
320
+ outputs=[
321
+ output_image_component,
322
+ output_mask_component
323
+ ],
324
+ run_on_click=True,
325
+ # cache_examples=True
326
+ )
327
 
328
  submit_button_component.click(
329
  fn=process,