salomonsky commited on
Commit
908e25f
1 Parent(s): 6c6a21d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -169
app.py CHANGED
@@ -15,193 +15,52 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
15
  basemodel = "black-forest-labs/FLUX.1-schnell"
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
- CSS = """
19
- footer {
20
- visibility: hidden;
21
- }
22
- """
23
-
24
- JS = """function () {
25
- gradioURL = window.location.href
26
- if (!gradioURL.endsWith('?__theme=dark')) {
27
- window.location.replace(gradioURL + '?__theme=dark');
28
- }
29
- }"""
30
 
31
  def enable_lora(lora_add):
32
- if not lora_add:
33
- return basemodel
34
- else:
35
- return lora_add
36
 
37
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
38
  client = Client("finegrain/finegrain-image-enhancer")
39
- result = client.predict(
40
- input_image=handle_file(img_path),
41
- prompt=prompt,
42
- negative_prompt="",
43
- seed=42,
44
- upscale_factor=upscale_factor,
45
- controlnet_scale=0.6,
46
- controlnet_decay=1,
47
- condition_scale=6,
48
- tile_width=112,
49
- tile_height=144,
50
- denoise_strength=0.35,
51
- num_inference_steps=18,
52
- solver="DDIM",
53
- api_name="/process"
54
- )
55
  return result[1]
56
 
57
- async def generate_image(
58
- prompt:str,
59
- model:str,
60
- lora_word:str,
61
- width:int=768,
62
- height:int=1024,
63
- scales:float=3.5,
64
- steps:int=24,
65
- seed:int=-1
66
- ):
67
-
68
- if seed == -1:
69
- seed = random.randint(0, MAX_SEED)
70
  seed = int(seed)
71
- print(f'prompt:{prompt}')
72
-
73
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
74
-
75
  client = AsyncInferenceClient()
76
- try:
77
- image = await client.text_to_image(
78
- prompt=text,
79
- height=height,
80
- width=width,
81
- guidance_scale=scales,
82
- num_inference_steps=steps,
83
- model=model,
84
- )
85
- except Exception as e:
86
- raise gr.Error(f"Error in {e}")
87
-
88
  return image, seed
89
 
90
- async def gen(
91
- prompt:str,
92
- lora_add:str="",
93
- lora_word:str="",
94
- width:int=768,
95
- height:int=1024,
96
- scales:float=3.5,
97
- steps:int=24,
98
- seed:int=-1,
99
- progress=gr.Progress(track_tqdm=True),
100
- upscale_factor:int=0
101
- ):
102
  model = enable_lora(lora_add)
103
- print(model)
104
- image, seed = await generate_image(prompt,model,lora_word,width,height,scales,steps,seed)
105
  if upscale_factor != 0:
106
- image = get_upscale_finegrain(prompt, image, upscale_factor)
107
- return image, seed, image
108
-
109
- def upscale_image(img_path, upscale_factor, prompt):
110
- if upscale_factor == 0:
111
- return img_path
112
- else:
113
- return get_upscale_finegrain(prompt, img_path, upscale_factor)
114
 
115
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
116
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
117
  with gr.Row():
118
  with gr.Column(scale=4):
119
- with gr.Row():
120
- img = gr.Image(type="filepath", label='Flux Generated Image', height=600)
121
- with gr.Row():
122
- prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6)
123
- sendBtn = gr.Button(scale=1, variant='primary')
124
  with gr.Accordion("Advanced Options", open=True):
125
  with gr.Column(scale=1):
126
- width = gr.Slider(
127
- label="Width",
128
- minimum=512,
129
- maximum=1280,
130
- step=8,
131
- value=768,
132
- )
133
- height = gr.Slider(
134
- label="Height",
135
- minimum=512,
136
- maximum=1280,
137
- step=8,
138
- value=1024,
139
- )
140
- scales = gr.Slider(
141
- label="Guidance",
142
- minimum=3.5,
143
- maximum=7,
144
- step=0.1,
145
- value=3.5,
146
- )
147
- steps = gr.Slider(
148
- label="Steps",
149
- minimum=1,
150
- maximum=100,
151
- step=1,
152
- value=24,
153
- )
154
- seed = gr.Slider(
155
- label="Seeds",
156
- minimum=-1,
157
- maximum=MAX_SEED,
158
- step=1,
159
- value=-1,
160
- )
161
- lora_add = gr.Textbox(
162
- label="Add Flux LoRA",
163
- info="Copy the HF LoRA model name here",
164
- lines=1,
165
- placeholder="Please use Warm status model",
166
- )
167
- lora_word = gr.Textbox(
168
- label="Add Flux LoRA Trigger Word",
169
- info="Add the Trigger Word",
170
- lines=1,
171
- value="",
172
- )
173
- upscale_factor = gr.Radio(
174
- label="UpScale Factor",
175
- choices=[
176
- 0,
177
- 2,
178
- 3,
179
- 4
180
- ],
181
- value=0,
182
- scale=2
183
- )
184
- output_res = gr.Image(label="Upscaled Image")
185
-
186
- gr.on(
187
- triggers=[
188
- prompt.submit,
189
- sendBtn.click,
190
- ],
191
- fn=gen,
192
- inputs=[
193
- prompt,
194
- lora_add,
195
- lora_word,
196
- width,
197
- height,
198
- scales,
199
- steps,
200
- seed,
201
- upscale_factor
202
- ],
203
- outputs=[img, seed, output_res]
204
- )
205
-
206
- if __name__ == "__main__":
207
- demo.queue(api_open=False).launch(show_api=False, share=False)
 
15
  basemodel = "black-forest-labs/FLUX.1-schnell"
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
+ CSS = "footer {visibility: hidden;}"
19
+ JS = "function () {gradioURL = window.location.href;if (!gradioURL.endsWith('?__theme=dark')) {window.location.replace(gradioURL + '?__theme=dark');}}"
 
 
 
 
 
 
 
 
 
 
20
 
21
  def enable_lora(lora_add):
22
+ if not lora_add: return basemodel
23
+ else: return lora_add
 
 
24
 
25
  def get_upscale_finegrain(prompt, img_path, upscale_factor):
26
  client = Client("finegrain/finegrain-image-enhancer")
27
+ result = client.predict(input_image=handle_file(img_path), prompt=prompt, negative_prompt="", seed=42, upscale_factor=upscale_factor, controlnet_scale=0.6, controlnet_decay=1, condition_scale=6, tile_width=112, tile_height=144, denoise_strength=0.35, num_inference_steps=18, solver="DDIM", api_name="/process")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return result[1]
29
 
30
+ async def generate_image(prompt, model, lora_word, width, height, scales, steps, seed):
31
+ if seed == -1: seed = random.randint(0, MAX_SEED)
 
 
 
 
 
 
 
 
 
 
 
32
  seed = int(seed)
 
 
33
  text = str(translator.translate(prompt, 'English')) + "," + lora_word
 
34
  client = AsyncInferenceClient()
35
+ try: image = await client.text_to_image(prompt=text, height=height, width=width, guidance_scale=scales, num_inference_steps=steps, model=model)
36
+ except Exception as e: raise gr.Error(f"Error in {e}")
 
 
 
 
 
 
 
 
 
 
37
  return image, seed
38
 
39
+ async def gen(prompt, lora_add, lora_word, width, height, scales, steps, seed, upscale_factor):
 
 
 
 
 
 
 
 
 
 
 
40
  model = enable_lora(lora_add)
41
+ image, seed = await generate_image(prompt, model, lora_word, width, height, scales, steps, seed)
 
42
  if upscale_factor != 0:
43
+ upscaled_image = get_upscale_finegrain(prompt, image, upscale_factor)
44
+ combined_image = Image.new('RGB', (image.width + upscaled_image.width, image.height))
45
+ combined_image.paste(image, (0, 0))
46
+ combined_image.paste(upscaled_image, (image.width, 0))
47
+ return combined_image, seed
48
+ else: return image, seed
 
 
49
 
50
  with gr.Blocks(css=CSS, js=JS, theme="Nymbo/Nymbo_Theme") as demo:
51
  gr.HTML("<h1><center>Flux Lab Light</center></h1>")
52
  with gr.Row():
53
  with gr.Column(scale=4):
54
+ with gr.Row(): img = gr.Image(type="filepath", label='Comparison Image', height=600)
55
+ with gr.Row(): prompt = gr.Textbox(label='Enter Your Prompt (Multi-Languages)', placeholder="Enter prompt...", scale=6); sendBtn = gr.Button(scale=1, variant='primary')
 
 
 
56
  with gr.Accordion("Advanced Options", open=True):
57
  with gr.Column(scale=1):
58
+ width = gr.Slider(label="Width", minimum=512, maximum=1280, step=8, value=768)
59
+ height = gr.Slider(label="Height", minimum=512, maximum=1280, step=8, value=1024)
60
+ scales = gr.Slider(label="Guidance", minimum=3.5, maximum=7, step=0.1, value=3.5)
61
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=24)
62
+ seed = gr.Slider(label="Seeds", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
63
+ lora_add = gr.Textbox(label="Add Flux LoRA", info="Copy the HF LoRA model name here", lines=1, placeholder="Please use Warm status model")
64
+ lora_word = gr.Textbox(label="Add Flux LoRA Trigger Word", info="Add the Trigger Word", lines=1, value="")
65
+ upscale_factor = gr.Radio(label="UpScale Factor", choices=[0, 2, 3, 4], value=0, scale=2)
66
+ gr.on(triggers=[prompt.submit, sendBtn.click], fn=gen, inputs=[prompt, lora_add, lora_word, width