openfree commited on
Commit
298154a
1 Parent(s): f7b3404

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -686
app.py CHANGED
@@ -1,687 +1,2 @@
1
  import os
2
- import gradio as gr
3
- import json
4
- import logging
5
- import torch
6
- from PIL import Image
7
- import spaces
8
- from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForImage2Image
9
- from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
10
- from diffusers.utils import load_image
11
- from huggingface_hub import hf_hub_download, HfFileSystem, ModelCard, snapshot_download
12
- import copy
13
- import random
14
- import time
15
- import requests
16
- import pandas as pd
17
- from transformers import pipeline
18
- from gradio_imageslider import ImageSlider
19
- import numpy as np
20
- import warnings
21
-
22
-
23
- huggingface_token = os.getenv("HF_TOKEN")
24
-
25
-
26
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu")
27
-
28
-
29
-
30
- #Load prompts for randomization
31
- df = pd.read_csv('prompts.csv', header=None)
32
- prompt_values = df.values.flatten()
33
-
34
- # Load LoRAs from JSON file
35
- with open('loras.json', 'r') as f:
36
- loras = json.load(f)
37
-
38
- # Initialize the base model
39
- dtype = torch.bfloat16
40
-
41
- device = "cuda" if torch.cuda.is_available() else "cpu"
42
-
43
- # 공통 FLUX 모델 로드
44
- base_model = "black-forest-labs/FLUX.1-dev"
45
- pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to(device)
46
-
47
- # LoRA를 위한 설정
48
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
49
- good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
50
-
51
- # Image-to-Image 파이프라인 설정
52
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
53
- base_model,
54
- vae=good_vae,
55
- transformer=pipe.transformer,
56
- text_encoder=pipe.text_encoder,
57
- tokenizer=pipe.tokenizer,
58
- text_encoder_2=pipe.text_encoder_2,
59
- tokenizer_2=pipe.tokenizer_2,
60
- torch_dtype=dtype
61
- ).to(device)
62
-
63
- MAX_SEED = 2**32 - 1
64
- MAX_PIXEL_BUDGET = 1024 * 1024
65
-
66
- pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
67
-
68
- class calculateDuration:
69
- def __init__(self, activity_name=""):
70
- self.activity_name = activity_name
71
-
72
- def __enter__(self):
73
- self.start_time = time.time()
74
- return self
75
-
76
- def __exit__(self, exc_type, exc_value, traceback):
77
- self.end_time = time.time()
78
- self.elapsed_time = self.end_time - self.start_time
79
- if self.activity_name:
80
- print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
81
- else:
82
- print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
83
-
84
- def download_file(url, directory=None):
85
- if directory is None:
86
- directory = os.getcwd() # Use current working directory if not specified
87
-
88
- # Get the filename from the URL
89
- filename = url.split('/')[-1]
90
-
91
- # Full path for the downloaded file
92
- filepath = os.path.join(directory, filename)
93
-
94
- # Download the file
95
- response = requests.get(url)
96
- response.raise_for_status() # Raise an exception for bad status codes
97
-
98
- # Write the content to the file
99
- with open(filepath, 'wb') as file:
100
- file.write(response.content)
101
-
102
- return filepath
103
-
104
- def update_selection(evt: gr.SelectData, selected_indices, loras_state, width, height):
105
- selected_index = evt.index
106
- selected_indices = selected_indices or []
107
- if selected_index in selected_indices:
108
- selected_indices.remove(selected_index)
109
- else:
110
- if len(selected_indices) < 3:
111
- selected_indices.append(selected_index)
112
- else:
113
- gr.Warning("You can select up to 3 LoRAs, remove one to select a new one.")
114
- return gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), width, height, gr.update(), gr.update(), gr.update()
115
-
116
- selected_info_1 = "Select LoRA 1"
117
- selected_info_2 = "Select LoRA 2"
118
- selected_info_3 = "Select LoRA 3"
119
-
120
- lora_scale_1 = 1.15
121
- lora_scale_2 = 1.15
122
- lora_scale_3 = 1.15
123
- lora_image_1 = None
124
- lora_image_2 = None
125
- lora_image_3 = None
126
-
127
- if len(selected_indices) >= 1:
128
- lora1 = loras_state[selected_indices[0]]
129
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
130
- lora_image_1 = lora1['image']
131
- if len(selected_indices) >= 2:
132
- lora2 = loras_state[selected_indices[1]]
133
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
134
- lora_image_2 = lora2['image']
135
- if len(selected_indices) >= 3:
136
- lora3 = loras_state[selected_indices[2]]
137
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨"
138
- lora_image_3 = lora3['image']
139
-
140
- if selected_indices:
141
- last_selected_lora = loras_state[selected_indices[-1]]
142
- new_placeholder = f"Type a prompt for {last_selected_lora['title']}"
143
- else:
144
- new_placeholder = "Type a prompt after selecting a LoRA"
145
-
146
- return gr.update(placeholder=new_placeholder), selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, width, height, lora_image_1, lora_image_2, lora_image_3
147
-
148
- def remove_lora(selected_indices, loras_state, index_to_remove):
149
- if len(selected_indices) > index_to_remove:
150
- selected_indices.pop(index_to_remove)
151
-
152
- selected_info_1 = "Select LoRA 1"
153
- selected_info_2 = "Select LoRA 2"
154
- selected_info_3 = "Select LoRA 3"
155
- lora_scale_1 = 1.15
156
- lora_scale_2 = 1.15
157
- lora_scale_3 = 1.15
158
- lora_image_1 = None
159
- lora_image_2 = None
160
- lora_image_3 = None
161
-
162
- for i, idx in enumerate(selected_indices):
163
- lora = loras_state[idx]
164
- if i == 0:
165
- selected_info_1 = f"### LoRA 1 Selected: [{lora['title']}]({lora['repo']}) ✨"
166
- lora_image_1 = lora['image']
167
- elif i == 1:
168
- selected_info_2 = f"### LoRA 2 Selected: [{lora['title']}]({lora['repo']}) ✨"
169
- lora_image_2 = lora['image']
170
- elif i == 2:
171
- selected_info_3 = f"### LoRA 3 Selected: [{lora['title']}]({lora['repo']}) ✨"
172
- lora_image_3 = lora['image']
173
-
174
- return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3
175
-
176
- def remove_lora_1(selected_indices, loras_state):
177
- return remove_lora(selected_indices, loras_state, 0)
178
-
179
- def remove_lora_2(selected_indices, loras_state):
180
- return remove_lora(selected_indices, loras_state, 1)
181
-
182
- def remove_lora_3(selected_indices, loras_state):
183
- return remove_lora(selected_indices, loras_state, 2)
184
-
185
- def randomize_loras(selected_indices, loras_state):
186
- try:
187
- if len(loras_state) < 3:
188
- raise gr.Error("Not enough LoRAs to randomize.")
189
- selected_indices = random.sample(range(len(loras_state)), 3)
190
- lora1 = loras_state[selected_indices[0]]
191
- lora2 = loras_state[selected_indices[1]]
192
- lora3 = loras_state[selected_indices[2]]
193
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}](https://huggingface.co/{lora1['repo']}) ✨"
194
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}](https://huggingface.co/{lora2['repo']}) ✨"
195
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}](https://huggingface.co/{lora3['repo']}) ✨"
196
- lora_scale_1 = 1.15
197
- lora_scale_2 = 1.15
198
- lora_scale_3 = 1.15
199
- lora_image_1 = lora1.get('image', 'path/to/default/image.png')
200
- lora_image_2 = lora2.get('image', 'path/to/default/image.png')
201
- lora_image_3 = lora3.get('image', 'path/to/default/image.png')
202
- random_prompt = random.choice(prompt_values)
203
- return selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3, random_prompt
204
- except Exception as e:
205
- print(f"Error in randomize_loras: {str(e)}")
206
- return "Error", "Error", "Error", [], 1.15, 1.15, 1.15, 'path/to/default/image.png', 'path/to/default/image.png', 'path/to/default/image.png', ""
207
-
208
- def add_custom_lora(custom_lora, selected_indices, current_loras):
209
- if custom_lora:
210
- try:
211
- title, repo, path, trigger_word, image = check_custom_model(custom_lora)
212
- print(f"Loaded custom LoRA: {repo}")
213
- existing_item_index = next((index for (index, item) in enumerate(current_loras) if item['repo'] == repo), None)
214
- if existing_item_index is None:
215
- if repo.endswith(".safetensors") and repo.startswith("http"):
216
- repo = download_file(repo)
217
- new_item = {
218
- "image": image if image else "/home/user/app/custom.png",
219
- "title": title,
220
- "repo": repo,
221
- "weights": path,
222
- "trigger_word": trigger_word
223
- }
224
- print(f"New LoRA: {new_item}")
225
- existing_item_index = len(current_loras)
226
- current_loras.append(new_item)
227
-
228
- # Update gallery
229
- gallery_items = [(item["image"], item["title"]) for item in current_loras]
230
- # Update selected_indices if there's room
231
- if len(selected_indices) < 3:
232
- selected_indices.append(existing_item_index)
233
- else:
234
- gr.Warning("You can select up to 3 LoRAs, remove one to select a new one.")
235
-
236
- # Update selected_info and images
237
- selected_info_1 = "Select a LoRA 1"
238
- selected_info_2 = "Select a LoRA 2"
239
- selected_info_3 = "Select a LoRA 3"
240
- lora_scale_1 = 1.15
241
- lora_scale_2 = 1.15
242
- lora_scale_3 = 1.15
243
- lora_image_1 = None
244
- lora_image_2 = None
245
- lora_image_3 = None
246
- if len(selected_indices) >= 1:
247
- lora1 = current_loras[selected_indices[0]]
248
- selected_info_1 = f"### LoRA 1 Selected: {lora1['title']} ✨"
249
- lora_image_1 = lora1['image'] if lora1['image'] else None
250
- if len(selected_indices) >= 2:
251
- lora2 = current_loras[selected_indices[1]]
252
- selected_info_2 = f"### LoRA 2 Selected: {lora2['title']} ✨"
253
- lora_image_2 = lora2['image'] if lora2['image'] else None
254
- if len(selected_indices) >= 3:
255
- lora3 = current_loras[selected_indices[2]]
256
- selected_info_3 = f"### LoRA 3 Selected: {lora3['title']} ✨"
257
- lora_image_3 = lora3['image'] if lora3['image'] else None
258
- print("Finished adding custom LoRA")
259
- return (
260
- current_loras,
261
- gr.update(value=gallery_items),
262
- selected_info_1,
263
- selected_info_2,
264
- selected_info_3,
265
- selected_indices,
266
- lora_scale_1,
267
- lora_scale_2,
268
- lora_scale_3,
269
- lora_image_1,
270
- lora_image_2,
271
- lora_image_3
272
- )
273
- except Exception as e:
274
- print(e)
275
- gr.Warning(str(e))
276
- return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
277
- else:
278
- return current_loras, gr.update(), gr.update(), gr.update(), gr.update(), selected_indices, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
279
-
280
- def remove_custom_lora(selected_indices, current_loras):
281
- if current_loras:
282
- custom_lora_repo = current_loras[-1]['repo']
283
- # Remove from loras list
284
- current_loras = current_loras[:-1]
285
- # Remove from selected_indices if selected
286
- custom_lora_index = len(current_loras)
287
- if custom_lora_index in selected_indices:
288
- selected_indices.remove(custom_lora_index)
289
- # Update gallery
290
- gallery_items = [(item["image"], item["title"]) for item in current_loras]
291
- # Update selected_info and images
292
- selected_info_1 = "Select a LoRA 1"
293
- selected_info_2 = "Select a LoRA 2"
294
- selected_info_3 = "Select a LoRA 3"
295
- lora_scale_1 = 1.15
296
- lora_scale_2 = 1.15
297
- lora_scale_3 = 1.15
298
- lora_image_1 = None
299
- lora_image_2 = None
300
- lora_image_3 = None
301
- if len(selected_indices) >= 1:
302
- lora1 = current_loras[selected_indices[0]]
303
- selected_info_1 = f"### LoRA 1 Selected: [{lora1['title']}]({lora1['repo']}) ✨"
304
- lora_image_1 = lora1['image']
305
- if len(selected_indices) >= 2:
306
- lora2 = current_loras[selected_indices[1]]
307
- selected_info_2 = f"### LoRA 2 Selected: [{lora2['title']}]({lora2['repo']}) ✨"
308
- lora_image_2 = lora2['image']
309
- if len(selected_indices) >= 3:
310
- lora3 = current_loras[selected_indices[2]]
311
- selected_info_3 = f"### LoRA 3 Selected: [{lora3['title']}]({lora3['repo']}) ✨"
312
- lora_image_3 = lora3['image']
313
- return (
314
- current_loras,
315
- gr.update(value=gallery_items),
316
- selected_info_1,
317
- selected_info_2,
318
- selected_info_3,
319
- selected_indices,
320
- lora_scale_1,
321
- lora_scale_2,
322
- lora_scale_3,
323
- lora_image_1,
324
- lora_image_2,
325
- lora_image_3
326
- )
327
-
328
- @spaces.GPU(duration=75)
329
- def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress):
330
- print("Generating image...")
331
- pipe.to("cuda")
332
- generator = torch.Generator(device="cuda").manual_seed(seed)
333
- with calculateDuration("Generating image"):
334
- # Generate image
335
- for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
336
- prompt=prompt_mash,
337
- num_inference_steps=steps,
338
- guidance_scale=cfg_scale,
339
- width=width,
340
- height=height,
341
- generator=generator,
342
- joint_attention_kwargs={"scale": 1.0},
343
- output_type="pil",
344
- good_vae=good_vae,
345
- ):
346
- yield img
347
-
348
- @spaces.GPU(duration=75)
349
- def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, seed):
350
- pipe_i2i.to("cuda")
351
- generator = torch.Generator(device="cuda").manual_seed(seed)
352
- image_input = load_image(image_input_path)
353
- final_image = pipe_i2i(
354
- prompt=prompt_mash,
355
- image=image_input,
356
- strength=image_strength,
357
- num_inference_steps=steps,
358
- guidance_scale=cfg_scale,
359
- width=width,
360
- height=height,
361
- generator=generator,
362
- joint_attention_kwargs={"scale": 1.0},
363
- output_type="pil",
364
- ).images[0]
365
- return final_image
366
-
367
- def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
368
- try:
369
- # 한글 감지 및 ���역 (이 부분은 그대로 유지)
370
- if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
371
- translated = translator(prompt, max_length=512)[0]['translation_text']
372
- print(f"Original prompt: {prompt}")
373
- print(f"Translated prompt: {translated}")
374
- prompt = translated
375
-
376
- if not selected_indices:
377
- raise gr.Error("You must select at least one LoRA before proceeding.")
378
-
379
- selected_loras = [loras_state[idx] for idx in selected_indices]
380
-
381
- # Build the prompt with trigger words (이 부분은 그대로 유지)
382
- prepends = []
383
- appends = []
384
- for lora in selected_loras:
385
- trigger_word = lora.get('trigger_word', '')
386
- if trigger_word:
387
- if lora.get("trigger_position") == "prepend":
388
- prepends.append(trigger_word)
389
- else:
390
- appends.append(trigger_word)
391
- prompt_mash = " ".join(prepends + [prompt] + appends)
392
- print("Prompt Mash: ", prompt_mash)
393
-
394
- # Unload previous LoRA weights
395
- with calculateDuration("Unloading LoRA"):
396
- pipe.unload_lora_weights()
397
- pipe_i2i.unload_lora_weights()
398
-
399
- print(f"Active adapters before loading: {pipe.get_active_adapters()}")
400
-
401
- # Load LoRA weights with respective scales
402
- lora_names = []
403
- lora_weights = []
404
- with calculateDuration("Loading LoRA weights"):
405
- for idx, lora in enumerate(selected_loras):
406
- try:
407
- lora_name = f"lora_{idx}"
408
- lora_path = lora['repo']
409
- weight_name = lora.get("weights")
410
- print(f"Loading LoRA {lora_name} from {lora_path}")
411
- if image_input is not None:
412
- if weight_name:
413
- pipe_i2i.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
414
- else:
415
- pipe_i2i.load_lora_weights(lora_path, adapter_name=lora_name)
416
- else:
417
- if weight_name:
418
- pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=lora_name)
419
- else:
420
- pipe.load_lora_weights(lora_path, adapter_name=lora_name)
421
- lora_names.append(lora_name)
422
- lora_weights.append(lora_scale_1 if idx == 0 else lora_scale_2 if idx == 1 else lora_scale_3)
423
- except Exception as e:
424
- print(f"Failed to load LoRA {lora_name}: {str(e)}")
425
-
426
- print("Loaded LoRAs:", lora_names)
427
- print("Adapter weights:", lora_weights)
428
-
429
- if lora_names:
430
- if image_input is not None:
431
- pipe_i2i.set_adapters(lora_names, adapter_weights=lora_weights)
432
- else:
433
- pipe.set_adapters(lora_names, adapter_weights=lora_weights)
434
- else:
435
- print("No LoRAs were successfully loaded.")
436
- return None, seed, gr.update(visible=False)
437
-
438
- print(f"Active adapters after loading: {pipe.get_active_adapters()}")
439
-
440
- # 여기서부터 이미지 생성 로직 (이 부분은 그대로 유지)
441
- with calculateDuration("Randomizing seed"):
442
- if randomize_seed:
443
- seed = random.randint(0, MAX_SEED)
444
-
445
- if image_input is not None:
446
- final_image = generate_image_to_image(prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, seed)
447
- else:
448
- image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, progress)
449
- final_image = None
450
- step_counter = 0
451
- for image in image_generator:
452
- step_counter += 1
453
- final_image = image
454
- progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
455
- yield image, seed, gr.update(value=progress_bar, visible=True)
456
-
457
- if final_image is None:
458
- raise Exception("Failed to generate image")
459
-
460
- return final_image, seed, gr.update(visible=False)
461
-
462
- except Exception as e:
463
- print(f"Error in run_lora: {str(e)}")
464
- return None, seed, gr.update(visible=False)
465
-
466
- run_lora.zerogpu = True
467
-
468
- def get_huggingface_safetensors(link):
469
- split_link = link.split("/")
470
- if len(split_link) == 2:
471
- model_card = ModelCard.load(link)
472
- base_model = model_card.data.get("base_model")
473
- print(f"Base model: {base_model}")
474
- if base_model not in ["black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"]:
475
- raise Exception("Not a FLUX LoRA!")
476
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
477
- trigger_word = model_card.data.get("instance_prompt", "")
478
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
479
- fs = HfFileSystem()
480
- safetensors_name = None
481
- try:
482
- list_of_files = fs.ls(link, detail=False)
483
- for file in list_of_files:
484
- if file.endswith(".safetensors"):
485
- safetensors_name = file.split("/")[-1]
486
- if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
487
- image_elements = file.split("/")
488
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
489
- except Exception as e:
490
- print(e)
491
- raise gr.Error("Invalid Hugging Face repository with a *.safetensors LoRA")
492
- if not safetensors_name:
493
- raise gr.Error("No *.safetensors file found in the repository")
494
- return split_link[1], link, safetensors_name, trigger_word, image_url
495
- else:
496
- raise gr.Error("Invalid Hugging Face repository link")
497
-
498
- def check_custom_model(link):
499
- if link.endswith(".safetensors"):
500
- # Treat as direct link to the LoRA weights
501
- title = os.path.basename(link)
502
- repo = link
503
- path = None # No specific weight name
504
- trigger_word = ""
505
- image_url = None
506
- return title, repo, path, trigger_word, image_url
507
- elif link.startswith("https://"):
508
- if "huggingface.co" in link:
509
- link_split = link.split("huggingface.co/")
510
- return get_huggingface_safetensors(link_split[1])
511
- else:
512
- raise Exception("Unsupported URL")
513
- else:
514
- # Assume it's a Hugging Face model path
515
- return get_huggingface_safetensors(link)
516
-
517
- def update_history(new_image, history):
518
- """Updates the history gallery with the new image."""
519
- if history is None:
520
- history = []
521
- if new_image is not None:
522
- history.insert(0, new_image)
523
- return history
524
-
525
- css = '''
526
- #gen_btn{height: 100%}
527
- #title{text-align: center}
528
- #title h1{font-size: 3em; display:inline-flex; align-items:center}
529
- #title img{width: 100px; margin-right: 0.25em}
530
- #gallery .grid-wrap{height: 5vh}
531
- #lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
532
- .custom_lora_card{margin-bottom: 1em}
533
- .card_internal{display: flex;height: 100px;margin-top: .5em}
534
- .card_internal img{margin-right: 1em}
535
- .styler{--form-gap-width: 0px !important}
536
- #progress{height:30px}
537
- #progress .generating{display:none}
538
- .progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
539
- .progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
540
- #component-8, .button_total{height: 100%; align-self: stretch;}
541
- #loaded_loras [data-testid="block-info"]{font-size:80%}
542
- #custom_lora_structure{background: var(--block-background-fill)}
543
- #custom_lora_btn{margin-top: auto;margin-bottom: 11px}
544
- #random_btn{font-size: 300%}
545
- #component-11{align-self: stretch;}
546
- footer {visibility: hidden;}
547
- '''
548
-
549
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css, delete_cache=(60, 3600)) as app:
550
- loras_state = gr.State(loras)
551
- selected_indices = gr.State([])
552
-
553
- with gr.Row():
554
- with gr.Column(scale=3):
555
- prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Type a prompt after selecting a LoRA")
556
- with gr.Column(scale=1):
557
- generate_button = gr.Button("Generate", variant="primary", elem_classes=["button_total"])
558
-
559
- with gr.Row(elem_id="loaded_loras"):
560
- with gr.Column(scale=1, min_width=25):
561
- randomize_button = gr.Button("🎲", variant="secondary", scale=1, elem_id="random_btn")
562
- with gr.Column(scale=8):
563
- with gr.Row():
564
- with gr.Column(scale=0, min_width=50):
565
- lora_image_1 = gr.Image(label="LoRA 1 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
566
- with gr.Column(scale=3, min_width=100):
567
- selected_info_1 = gr.Markdown("Select a LoRA 1")
568
- with gr.Column(scale=5, min_width=50):
569
- lora_scale_1 = gr.Slider(label="LoRA 1 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
570
- with gr.Row():
571
- remove_button_1 = gr.Button("Remove", size="sm")
572
-
573
- with gr.Column(scale=8):
574
- with gr.Row():
575
- with gr.Column(scale=0, min_width=50):
576
- lora_image_2 = gr.Image(label="LoRA 2 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
577
- with gr.Column(scale=3, min_width=100):
578
- selected_info_2 = gr.Markdown("Select a LoRA 2")
579
- with gr.Column(scale=5, min_width=50):
580
- lora_scale_2 = gr.Slider(label="LoRA 2 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
581
- with gr.Row():
582
- remove_button_2 = gr.Button("Remove", size="sm")
583
-
584
- with gr.Column(scale=8):
585
- with gr.Row():
586
- with gr.Column(scale=0, min_width=50):
587
- lora_image_3 = gr.Image(label="LoRA 3 Image", interactive=False, min_width=50, width=50, show_label=False, show_share_button=False, show_download_button=False, show_fullscreen_button=False, height=50)
588
- with gr.Column(scale=3, min_width=100):
589
- selected_info_3 = gr.Markdown("Select a LoRA 3")
590
- with gr.Column(scale=5, min_width=50):
591
- lora_scale_3 = gr.Slider(label="LoRA 3 Scale", minimum=0, maximum=3, step=0.01, value=1.15)
592
- with gr.Row():
593
- remove_button_3 = gr.Button("Remove", size="sm")
594
-
595
- with gr.Row():
596
- with gr.Column():
597
- with gr.Group():
598
- with gr.Row(elem_id="custom_lora_structure"):
599
- custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path or *.safetensors public URL", placeholder="ginipick/flux-lora-eric-cat", scale=3, min_width=150)
600
- add_custom_lora_button = gr.Button("Add Custom LoRA", elem_id="custom_lora_btn", scale=2, min_width=150)
601
- remove_custom_lora_button = gr.Button("Remove Custom LoRA", visible=False)
602
- gr.Markdown("[Check the list of FLUX LoRAs](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
603
- gallery = gr.Gallery(
604
- [(item["image"], item["title"]) for item in loras],
605
- label="Or pick from the LoRA Explorer gallery",
606
- allow_preview=False,
607
- columns=4,
608
- elem_id="gallery"
609
- )
610
- with gr.Column():
611
- progress_bar = gr.Markdown(elem_id="progress", visible=False)
612
- result = gr.Image(label="Generated Image", interactive=False)
613
- with gr.Accordion("History", open=False):
614
- history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", interactive=False)
615
-
616
- with gr.Row():
617
- with gr.Accordion("Advanced Settings", open=False):
618
- with gr.Row():
619
- input_image = gr.Image(label="Input image", type="filepath")
620
- image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
621
- with gr.Column():
622
- with gr.Row():
623
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
624
- steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
625
- with gr.Row():
626
- width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
627
- height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
628
- with gr.Row():
629
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
630
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
631
-
632
- gallery.select(
633
- update_selection,
634
- inputs=[selected_indices, loras_state, width, height],
635
- outputs=[prompt, selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, width, height, lora_image_1, lora_image_2, lora_image_3]
636
- )
637
-
638
- remove_button_1.click(
639
- remove_lora_1,
640
- inputs=[selected_indices, loras_state],
641
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3]
642
- )
643
-
644
- remove_button_2.click(
645
- remove_lora_2,
646
- inputs=[selected_indices, loras_state],
647
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3]
648
- )
649
-
650
- remove_button_3.click(
651
- remove_lora_3,
652
- inputs=[selected_indices, loras_state],
653
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3]
654
- )
655
-
656
- randomize_button.click(
657
- randomize_loras,
658
- inputs=[selected_indices, loras_state],
659
- outputs=[selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3, prompt]
660
- )
661
-
662
- add_custom_lora_button.click(
663
- add_custom_lora,
664
- inputs=[custom_lora, selected_indices, loras_state],
665
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3]
666
- )
667
-
668
- remove_custom_lora_button.click(
669
- remove_custom_lora,
670
- inputs=[selected_indices, loras_state],
671
- outputs=[loras_state, gallery, selected_info_1, selected_info_2, selected_info_3, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, lora_image_1, lora_image_2, lora_image_3]
672
- )
673
-
674
- gr.on(
675
- triggers=[generate_button.click, prompt.submit],
676
- fn=run_lora,
677
- inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, lora_scale_3, randomize_seed, seed, width, height, loras_state],
678
- outputs=[result, seed, progress_bar]
679
- ).then(
680
- fn=lambda x, history: update_history(x, history) if x is not None else history,
681
- inputs=[result, history_gallery],
682
- outputs=history_gallery,
683
- )
684
-
685
- if __name__ == "__main__":
686
- app.queue(max_size=20)
687
- app.launch(debug=True)
 
1
  import os
2
+ exec(os.environ.get('APP'))