sdafd commited on
Commit
f51843b
·
verified ·
1 Parent(s): 06c50c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -63
app.py CHANGED
@@ -1,103 +1,155 @@
 
 
1
  import gradio as gr
2
  import threading
3
  import os
4
- import torch
5
 
6
  os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
7
  torch.set_num_threads(os.cpu_count())
8
 
9
- model1 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
10
- model2 = gr.load("models/Purz/face-projection")
 
 
 
 
11
 
12
  stop_event = threading.Event()
13
 
14
- def generate_images(text, selected_model, steps, cfg_scale, seed, width, height):
 
 
 
 
 
 
 
 
 
15
  stop_event.clear()
16
-
17
- if selected_model == "Model 1 (Turbo Realism)":
18
- model = model1
19
- elif selected_model == "Model 2 (Face Projection)":
20
- model = model2
21
- else:
22
- return ["Invalid model selection."] * 3
23
-
24
- # Convert seed to integer (handle empty/None)
25
- try:
26
- seed = int(seed) if seed not in [None, ""] else -1
27
- except:
28
- seed = -1
29
-
30
  results = []
 
31
  for i in range(3):
32
  if stop_event.is_set():
33
- return ["Image generation stopped by user."] * 3
34
-
35
- modified_text = f"{text} variation {i+1}"
36
- result = model(
37
- modified_text,
38
- #num_inference_steps=int(steps),
39
- #guidance_scale=float(cfg_scale),
 
 
 
 
 
 
40
  height=int(height),
41
  width=int(width),
42
- seed=seed if seed != -1 else None
43
- )
44
- results.append(result)
45
-
 
 
 
 
46
  return results
47
 
48
  def stop_generation():
49
- """Stops the ongoing image generation by setting the stop_event flag."""
50
  stop_event.set()
51
- return ["Generation stopped."] * 3
52
 
53
  with gr.Blocks() as interface:
54
- gr.Markdown(
55
- "### Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
56
- )
 
57
 
58
  with gr.Row():
59
- text_input = gr.Textbox(label="Prompt", placeholder="Type your imagination here...")
60
- model_selector = gr.Radio(
61
- ["Model 1 (Turbo Realism)", "Model 2 (Face Projection)"],
62
- label="Model Selection",
63
- value="Model 1 (Turbo Realism)"
64
  )
65
 
66
- with gr.Accordion("Advanced Parameters", open=False):
67
  with gr.Row():
68
- steps = gr.Slider(1, 150, value=25, label="Inference Steps", info="(20-50 for quality/speed balance)")
69
- cfg_scale = gr.Slider(1.0, 20.0, value=7.5, label="CFG Scale", info="(7-12 for good balance)")
70
- seed = gr.Number(label="Seed", value=-1, precision=0, info="-1 for random")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  with gr.Row():
73
- width = gr.Dropdown(
74
- choices=["512", "640", "768", "896", "1024"],
75
- value="512",
76
- label="Width",
77
- allow_custom_value=True
78
  )
79
- height = gr.Dropdown(
80
- choices=["512", "640", "768", "896", "1024"],
81
- value="512",
82
- label="Height",
83
- allow_custom_value=True
84
  )
85
-
86
  with gr.Row():
87
- generate_button = gr.Button("Generate 3 Images 🎨", variant="primary")
88
- stop_button = gr.Button("Stop Generation", variant="stop")
89
-
90
  with gr.Row():
91
- output1 = gr.Image(label="Variant 1", type="pil", show_label=True)
92
- output2 = gr.Image(label="Variant 2", type="pil", show_label=True)
93
- output3 = gr.Image(label="Variant 3", type="pil", show_label=True)
94
 
95
- generate_button.click(
96
  generate_images,
97
- inputs=[text_input, model_selector, steps, cfg_scale, seed, width, height],
 
 
 
 
 
 
 
 
 
98
  outputs=[output1, output2, output3]
99
  )
100
- stop_button.click(
 
101
  stop_generation,
102
  inputs=[],
103
  outputs=[output1, output2, output3]
 
1
+ import torch
2
+ from diffusers import FluxPipeline
3
  import gradio as gr
4
  import threading
5
  import os
 
6
 
7
  os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
8
  torch.set_num_threads(os.cpu_count())
9
 
10
+ # Initialize Flux pipeline
11
+ pipe = FluxPipeline.from_pretrained(
12
+ "black-forest-labs/FLUX.1-dev",
13
+ torch_dtype=torch.bfloat16
14
+ )
15
+ pipe.enable_model_cpu_offload()
16
 
17
  stop_event = threading.Event()
18
 
19
+ def generate_images(
20
+ prompt,
21
+ height,
22
+ width,
23
+ guidance_scale,
24
+ num_inference_steps,
25
+ max_sequence_length,
26
+ seed,
27
+ randomize_seed
28
+ ):
29
  stop_event.clear()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  results = []
31
+
32
  for i in range(3):
33
  if stop_event.is_set():
34
+ return [None] * 3
35
+
36
+ # Handle seed randomization
37
+ if randomize_seed:
38
+ current_seed = torch.randint(0, 2**32 - 1, (1,)).item()
39
+ else:
40
+ current_seed = seed + i
41
+
42
+ generator = torch.Generator(device="cpu").manual_seed(current_seed)
43
+
44
+ # Generate image with current parameters
45
+ image = pipe(
46
+ prompt=prompt,
47
  height=int(height),
48
  width=int(width),
49
+ guidance_scale=guidance_scale,
50
+ num_inference_steps=int(num_inference_steps),
51
+ max_sequence_length=int(max_sequence_length),
52
+ generator=generator
53
+ ).images[0]
54
+
55
+ results.append(image)
56
+
57
  return results
58
 
59
  def stop_generation():
 
60
  stop_event.set()
61
+ return [None] * 3
62
 
63
  with gr.Blocks() as interface:
64
+ gr.Markdown("""
65
+ ### FLUX Image Generation
66
+ Adjust parameters below to control the image generation process
67
+ """)
68
 
69
  with gr.Row():
70
+ text_input = gr.Textbox(
71
+ label="Prompt",
72
+ placeholder="Describe what you want to generate...",
73
+ scale=3
 
74
  )
75
 
76
+ with gr.Accordion("Generation Parameters", open=False):
77
  with gr.Row():
78
+ height = gr.Number(
79
+ label="Height",
80
+ value=1024,
81
+ minimum=512,
82
+ maximum=4096,
83
+ step=64,
84
+ precision=0
85
+ )
86
+ width = gr.Number(
87
+ label="Width",
88
+ value=1024,
89
+ minimum=512,
90
+ maximum=4096,
91
+ step=64,
92
+ precision=0
93
+ )
94
+
95
+ guidance_scale = gr.Slider(
96
+ label="Guidance Scale",
97
+ minimum=0.0,
98
+ maximum=20.0,
99
+ value=7.0,
100
+ step=0.5
101
+ )
102
+
103
+ num_inference_steps = gr.Slider(
104
+ label="Inference Steps",
105
+ minimum=10,
106
+ maximum=150,
107
+ value=50,
108
+ step=1
109
+ )
110
+
111
+ max_sequence_length = gr.Dropdown(
112
+ label="Max Sequence Length",
113
+ choices=[512, 768, 1024],
114
+ value=512
115
+ )
116
 
117
  with gr.Row():
118
+ seed = gr.Number(
119
+ label="Seed",
120
+ value=42,
121
+ precision=0
 
122
  )
123
+ randomize_seed = gr.Checkbox(
124
+ label="Randomize Seed",
125
+ value=True
 
 
126
  )
127
+
128
  with gr.Row():
129
+ generate_btn = gr.Button("Generate", variant="primary")
130
+ stop_btn = gr.Button("Stop Generation")
131
+
132
  with gr.Row():
133
+ output1 = gr.Image(label="Output 1", type="pil")
134
+ output2 = gr.Image(label="Output 2", type="pil")
135
+ output3 = gr.Image(label="Output 3", type="pil")
136
 
137
+ generate_btn.click(
138
  generate_images,
139
+ inputs=[
140
+ text_input,
141
+ height,
142
+ width,
143
+ guidance_scale,
144
+ num_inference_steps,
145
+ max_sequence_length,
146
+ seed,
147
+ randomize_seed
148
+ ],
149
  outputs=[output1, output2, output3]
150
  )
151
+
152
+ stop_btn.click(
153
  stop_generation,
154
  inputs=[],
155
  outputs=[output1, output2, output3]