ruslanmv commited on
Commit
18d3f26
·
verified ·
1 Parent(s): 93cdc3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +723 -14
app.py CHANGED
@@ -1,24 +1,733 @@
1
- # app.py
 
 
 
 
2
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- from flux_app.frontend import Frontend
5
- from flux_app.backend import ModelManager
 
 
 
 
 
 
 
 
 
 
6
 
7
- def main():
8
- # Get the Hugging Face token from an environment variable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  hf_token = os.environ.get("HF_TOKEN")
10
  if not hf_token:
11
  raise ValueError("Hugging Face token (HF_TOKEN) not found in environment variables. Please set it.")
12
-
13
  model_manager = ModelManager(hf_token=hf_token)
14
  frontend = Frontend(model_manager)
15
  app = frontend.create_ui()
16
-
17
- # Enable request queuing. (No extra keyword arguments here.)
18
  app.queue()
19
-
20
- # Launch the app with the specified server configuration.
21
- app.launch(server_name="0.0.0.0", server_port=7860, share=False)
22
-
23
- if __name__ == "__main__":
24
- main()
 
1
+
2
+
3
+ ##############################
4
+ # ===== Standard Imports =====
5
+ ##############################
6
  import os
7
+ import sys
8
+ import time
9
+ import random
10
+ import json
11
+ from math import floor
12
+ from typing import Any, Dict, List, Optional, Union
13
+
14
+ import torch
15
+ import numpy as np
16
+ import requests
17
+ from PIL import Image
18
+
19
+ # Diffusers imports
20
+ from diffusers import (
21
+ DiffusionPipeline,
22
+ AutoencoderTiny,
23
+ AutoencoderKL,
24
+ AutoPipelineForImage2Image,
25
+ )
26
+ from diffusers.utils import load_image
27
+
28
+ # Hugging Face Hub
29
+ from huggingface_hub import ModelCard, HfFileSystem
30
+
31
+ # Gradio (UI)
32
+ import gradio as gr
33
+
34
+ ##############################
35
+ # ===== config.py =====
36
+ ##############################
37
+ # Configuration parameters
38
+ DTYPE = torch.bfloat16
39
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
40
+ BASE_MODEL = "black-forest-labs/FLUX.1-dev"
41
+ TAEF1_MODEL = "madebyollin/taef1"
42
+ MAX_SEED = 2**32 - 1
43
+
44
+ ##############################
45
+ # ===== utilities.py =====
46
+ ##############################
47
+ def calculate_shift(
48
+ image_seq_len,
49
+ base_seq_len: int = 256,
50
+ max_seq_len: int = 4096,
51
+ base_shift: float = 0.5,
52
+ max_shift: float = 1.16,
53
+ ):
54
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
55
+ b = base_shift - m * base_seq_len
56
+ mu = image_seq_len * m + b
57
+ return mu
58
+
59
+ def retrieve_timesteps(
60
+ scheduler,
61
+ num_inference_steps: Optional[int] = None,
62
+ device: Optional[Union[str, torch.device]] = None,
63
+ timesteps: Optional[List[int]] = None,
64
+ sigmas: Optional[List[float]] = None,
65
+ **kwargs,
66
+ ):
67
+ if timesteps is not None and sigmas is not None:
68
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
69
+ if timesteps is not None:
70
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
71
+ timesteps = scheduler.timesteps
72
+ num_inference_steps = len(timesteps)
73
+ elif sigmas is not None:
74
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
75
+ timesteps = scheduler.timesteps
76
+ num_inference_steps = len(timesteps)
77
+ else:
78
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
79
+ timesteps = scheduler.timesteps
80
+ return timesteps, num_inference_steps
81
+
82
+ def load_image_from_path(image_path: str):
83
+ """Loads an image from a given file path."""
84
+ return load_image(image_path)
85
+
86
+ def randomize_seed_if_needed(randomize_seed: bool, seed: int, max_seed: int) -> int:
87
+ """Randomizes the seed if requested."""
88
+ if randomize_seed:
89
+ return random.randint(0, max_seed)
90
+ return seed
91
+
92
+ class calculateDuration:
93
+ def __init__(self, activity_name=""):
94
+ self.activity_name = activity_name
95
+
96
+ def __enter__(self):
97
+ self.start_time = time.time()
98
+ return self
99
+
100
+ def __exit__(self, exc_type, exc_value, traceback):
101
+ self.end_time = time.time()
102
+ self.elapsed_time = self.end_time - self.start_time
103
+ if self.activity_name:
104
+ print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds")
105
+ else:
106
+ print(f"Elapsed time: {self.elapsed_time:.6f} seconds")
107
+
108
+ ##############################
109
+ # ===== enhance.py =====
110
+ ##############################
111
+ def generate(message, max_new_tokens=256, temperature=0.9, top_p=0.95, repetition_penalty=1.0):
112
+ """
113
+ Generates an enhanced prompt using a streaming Hugging Face API.
114
+ Enhances the given prompt under 100 words without changing its essence.
115
+ """
116
+ SYSTEM_PROMPT = (
117
+ "You are a prompt enhancer and your work is to enhance the given prompt under 100 words "
118
+ "without changing the essence, only write the enhanced prompt and nothing else."
119
+ )
120
+ timestamp = time.time()
121
+ formatted_prompt = (
122
+ f"<s>[INST] SYSTEM: {SYSTEM_PROMPT} [/INST]"
123
+ f"[INST] {message} {timestamp} [/INST]"
124
+ )
125
+
126
+ api_url = "https://ruslanmv-hf-llm-api.hf.space/api/v1/chat/completions"
127
+ headers = {"Content-Type": "application/json"}
128
+
129
+ payload = {
130
+ "model": "mixtral-8x7b",
131
+ "messages": [{"role": "user", "content": formatted_prompt}],
132
+ "temperature": temperature,
133
+ "top_p": top_p,
134
+ "max_tokens": max_new_tokens,
135
+ "use_cache": False,
136
+ "stream": True
137
+ }
138
+
139
+ try:
140
+ response = requests.post(api_url, headers=headers, json=payload, stream=True)
141
+ response.raise_for_status()
142
+ full_output = ""
143
+
144
+ for line in response.iter_lines():
145
+ if not line:
146
+ continue
147
+ decoded_line = line.decode("utf-8").strip()
148
+ if decoded_line.startswith("data:"):
149
+ decoded_line = decoded_line[len("data:"):].strip()
150
+ if decoded_line == "[DONE]":
151
+ break
152
+ try:
153
+ json_data = json.loads(decoded_line)
154
+ for choice in json_data.get("choices", []):
155
+ delta = choice.get("delta", {})
156
+ content = delta.get("content", "")
157
+ full_output += content
158
+ yield full_output
159
+ if choice.get("finish_reason") == "stop":
160
+ return
161
+ except json.JSONDecodeError:
162
+ continue
163
+ except requests.exceptions.RequestException as e:
164
+ yield f"Error during generation: {str(e)}"
165
+
166
+ ##############################
167
+ # ===== lora_handling.py =====
168
+ ##############################
169
+ # A default list of LoRAs for the UI (this would normally be loaded from a separate module)
170
+ loras = [
171
+ {"image": "placeholder.jpg", "title": "Placeholder LoRA", "repo": "placeholder/repo", "weights": None, "trigger_word": ""}
172
+ ]
173
+
174
+ @torch.inference_mode()
175
+ def flux_pipe_call_that_returns_an_iterable_of_images(
176
+ self,
177
+ prompt: Union[str, List[str]] = None,
178
+ prompt_2: Optional[Union[str, List[str]]] = None,
179
+ height: Optional[int] = None,
180
+ width: Optional[int] = None,
181
+ num_inference_steps: int = 28,
182
+ timesteps: List[int] = None,
183
+ guidance_scale: float = 3.5,
184
+ num_images_per_prompt: Optional[int] = 1,
185
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
186
+ latents: Optional[torch.FloatTensor] = None,
187
+ prompt_embeds: Optional[torch.FloatTensor] = None,
188
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
189
+ output_type: Optional[str] = "pil",
190
+ return_dict: bool = True,
191
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
192
+ max_sequence_length: int = 512,
193
+ good_vae: Optional[Any] = None,
194
+ ):
195
+ height = height or self.default_sample_size * self.vae_scale_factor
196
+ width = width or self.default_sample_size * self.vae_scale_factor
197
+
198
+ self.check_inputs(
199
+ prompt,
200
+ prompt_2,
201
+ height,
202
+ width,
203
+ prompt_embeds=prompt_embeds,
204
+ pooled_prompt_embeds=pooled_prompt_embeds,
205
+ max_sequence_length=max_sequence_length,
206
+ )
207
+
208
+ self._guidance_scale = guidance_scale
209
+ self._joint_attention_kwargs = joint_attention_kwargs
210
+ self._interrupt = False
211
+
212
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
213
+ device = self._execution_device
214
+
215
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
216
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
217
+ prompt=prompt,
218
+ prompt_2=prompt_2,
219
+ prompt_embeds=prompt_embeds,
220
+ pooled_prompt_embeds=pooled_prompt_embeds,
221
+ device=device,
222
+ num_images_per_prompt=num_images_per_prompt,
223
+ max_sequence_length=max_sequence_length,
224
+ lora_scale=lora_scale,
225
+ )
226
+
227
+ num_channels_latents = self.transformer.config.in_channels // 4
228
+ latents, latent_image_ids = self.prepare_latents(
229
+ batch_size * num_images_per_prompt,
230
+ num_channels_latents,
231
+ height,
232
+ width,
233
+ prompt_embeds.dtype,
234
+ device,
235
+ generator,
236
+ latents,
237
+ )
238
+
239
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
240
+ image_seq_len = latents.shape[1]
241
+ mu = calculate_shift(
242
+ image_seq_len,
243
+ self.scheduler.config.base_image_seq_len,
244
+ self.scheduler.config.max_image_seq_len,
245
+ self.scheduler.config.base_shift,
246
+ self.scheduler.config.max_shift,
247
+ )
248
+ timesteps, num_inference_steps = retrieve_timesteps(
249
+ self.scheduler,
250
+ num_inference_steps,
251
+ device,
252
+ timesteps,
253
+ sigmas,
254
+ mu=mu,
255
+ )
256
+ self._num_timesteps = len(timesteps)
257
+
258
+ guidance = (torch.full([1], guidance_scale, device=device, dtype=torch.float32)
259
+ .expand(latents.shape[0])
260
+ if self.transformer.config.guidance_embeds else None)
261
+
262
+ for i, t in enumerate(timesteps):
263
+ if self.interrupt:
264
+ continue
265
+
266
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
267
+
268
+ noise_pred = self.transformer(
269
+ hidden_states=latents,
270
+ timestep=timestep / 1000,
271
+ guidance=guidance,
272
+ pooled_projections=pooled_prompt_embeds,
273
+ encoder_hidden_states=prompt_embeds,
274
+ txt_ids=text_ids,
275
+ img_ids=latent_image_ids,
276
+ joint_attention_kwargs=self.joint_attention_kwargs,
277
+ return_dict=False,
278
+ )[0]
279
+
280
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
281
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
282
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
283
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
284
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
285
+ torch.cuda.empty_cache()
286
+
287
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
288
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
289
+ image = good_vae.decode(latents, return_dict=False)[0]
290
+ self.maybe_free_model_hooks()
291
+ torch.cuda.empty_cache()
292
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
293
+
294
+ def get_huggingface_safetensors(link: str) -> tuple:
295
+ split_link = link.split("/")
296
+ if len(split_link) == 2:
297
+ model_card = ModelCard.load(link)
298
+ base_model = model_card.data.get("base_model")
299
+ print(base_model)
300
+
301
+ if base_model not in ("black-forest-labs/FLUX.1-dev", "black-forest-labs/FLUX.1-schnell"):
302
+ raise Exception("Flux LoRA Not Found!")
303
+
304
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
305
+ trigger_word = model_card.data.get("instance_prompt", "")
306
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
307
+ fs = HfFileSystem()
308
+ try:
309
+ list_of_files = fs.ls(link, detail=False)
310
+ for file in list_of_files:
311
+ if file.endswith(".safetensors"):
312
+ safetensors_name = file.split("/")[-1]
313
+ if not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp")):
314
+ image_elements = file.split("/")
315
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
316
+ return split_link[1], link, safetensors_name, trigger_word, image_url
317
+ except Exception as e:
318
+ print(e)
319
+ raise Exception("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
320
+ else:
321
+ raise Exception("You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
322
+
323
+ def check_custom_model(link: str) -> tuple:
324
+ if link.startswith("https://"):
325
+ if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
326
+ link_split = link.split("huggingface.co/")
327
+ return get_huggingface_safetensors(link_split[1])
328
+ return get_huggingface_safetensors(link)
329
+
330
+ def create_lora_card(title: str, repo: str, trigger_word: str, image: str) -> str:
331
+ trigger_word_info = (
332
+ f"Using: <code><b>{trigger_word}</b></code> as the trigger word"
333
+ if trigger_word
334
+ else "No trigger word found. If there's a trigger word, include it in your prompt"
335
+ )
336
+ return f'''
337
+ <div class="custom_lora_card">
338
+ <span>Loaded custom LoRA:</span>
339
+ <div class="card_internal">
340
+ <img src="{image}" />
341
+ <div>
342
+ <h3>{title}</h3>
343
+ <small>{trigger_word_info}<br></small>
344
+ </div>
345
+ </div>
346
+ </div>
347
+ '''
348
+
349
+ def add_custom_lora(custom_lora: str, loras_list: list) -> tuple:
350
+ if custom_lora:
351
+ try:
352
+ title, repo, path, trigger_word, image = check_custom_model(custom_lora)
353
+ print(f"Loaded custom LoRA: {repo}")
354
+ card = create_lora_card(title, repo, trigger_word, image)
355
 
356
+ existing_item_index = next((index for (index, item) in enumerate(loras_list) if item['repo'] == repo), None)
357
+ if existing_item_index is None:
358
+ new_item = {
359
+ "image": image,
360
+ "title": title,
361
+ "repo": repo,
362
+ "weights": path,
363
+ "trigger_word": trigger_word
364
+ }
365
+ print(new_item)
366
+ loras_list.append(new_item)
367
+ existing_item_index = len(loras_list) - 1
368
 
369
+ return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
370
+
371
+ except Exception as e:
372
+ print(f"Error loading LoRA: {e}")
373
+ return gr.update(visible=True, value="Invalid LoRA"), gr.update(visible=False), gr.update(), "", None, ""
374
+ else:
375
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
376
+
377
+ def remove_custom_lora() -> tuple:
378
+ return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
379
+
380
+ def prepare_prompt(prompt: str, selected_index: Optional[int], loras_list: list) -> str:
381
+ if selected_index is None:
382
+ raise gr.Error("You must select a LoRA before proceeding.🧨")
383
+
384
+ selected_lora = loras_list[selected_index]
385
+ trigger_word = selected_lora.get("trigger_word")
386
+ if trigger_word:
387
+ trigger_position = selected_lora.get("trigger_position", "append")
388
+ if trigger_position == "prepend":
389
+ prompt_mash = f"{trigger_word} {prompt}"
390
+ else:
391
+ prompt_mash = f"{prompt} {trigger_word}"
392
+ else:
393
+ prompt_mash = prompt
394
+ return prompt_mash
395
+
396
+ def unload_lora_weights(pipe, pipe_i2i):
397
+ if pipe is not None:
398
+ pipe.unload_lora_weights()
399
+ if pipe_i2i is not None:
400
+ pipe_i2i.unload_lora_weights()
401
+
402
+ def load_lora_weights_into_pipeline(pipe_to_use, lora_path: str, weight_name: Optional[str]):
403
+ pipe_to_use.load_lora_weights(
404
+ lora_path,
405
+ weight_name=weight_name,
406
+ low_cpu_mem_usage=True
407
+ )
408
+
409
+ def update_selection(evt: gr.SelectData, width, height, loras_list):
410
+ selected_lora = loras_list[evt.index]
411
+ new_placeholder = f"Type a prompt for {selected_lora['title']}"
412
+ lora_repo = selected_lora["repo"]
413
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
414
+ if "aspect" in selected_lora:
415
+ if selected_lora["aspect"] == "portrait":
416
+ width = 768
417
+ height = 1024
418
+ elif selected_lora["aspect"] == "landscape":
419
+ width = 1024
420
+ height = 768
421
+ else:
422
+ width = 1024
423
+ height = 1024
424
+ return (
425
+ gr.update(placeholder=new_placeholder),
426
+ updated_text,
427
+ evt.index,
428
+ width,
429
+ height,
430
+ )
431
+
432
+ ##############################
433
+ # ===== backend.py =====
434
+ ##############################
435
+ class ModelManager:
436
+ def __init__(self, hf_token=None):
437
+ self.hf_token = hf_token
438
+ self.pipe = None
439
+ self.pipe_i2i = None
440
+ self.good_vae = None
441
+ self.taef1 = None
442
+ self.initialize_models()
443
+
444
+ def initialize_models(self):
445
+ """Initializes the diffusion pipelines and autoencoders."""
446
+ self.taef1 = AutoencoderTiny.from_pretrained(TAEF1_MODEL, torch_dtype=DTYPE).to(DEVICE)
447
+ self.good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=DTYPE).to(DEVICE)
448
+ # Optionally, if your model is private, you can pass `use_auth_token=self.hf_token` here.
449
+ self.pipe = DiffusionPipeline.from_pretrained(BASE_MODEL, torch_dtype=DTYPE, vae=self.taef1)
450
+ self.pipe = self.pipe.to(DEVICE)
451
+ self.pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
452
+ BASE_MODEL,
453
+ vae=self.good_vae,
454
+ transformer=self.pipe.transformer,
455
+ text_encoder=self.pipe.text_encoder,
456
+ tokenizer=self.pipe.tokenizer,
457
+ text_encoder_2=self.pipe.text_encoder_2,
458
+ tokenizer_2=self.pipe.tokenizer_2,
459
+ torch_dtype=DTYPE,
460
+ ).to(DEVICE)
461
+ # Bind the custom LoRA call to the pipeline.
462
+ self.pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(self.pipe)
463
+
464
+ def generate_image(self, prompt_mash, steps, seed, cfg_scale, width, height, lora_scale):
465
+ """Generates an image using the text-to-image pipeline."""
466
+ self.pipe.to(DEVICE)
467
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
468
+ with calculateDuration("Generating image"):
469
+ for img in self.pipe.flux_pipe_call_that_returns_an_iterable_of_images(
470
+ prompt=prompt_mash,
471
+ num_inference_steps=steps,
472
+ guidance_scale=cfg_scale,
473
+ width=width,
474
+ height=height,
475
+ generator=generator,
476
+ joint_attention_kwargs={"scale": lora_scale},
477
+ output_type="pil",
478
+ good_vae=self.good_vae,
479
+ ):
480
+ yield img
481
+
482
+ def generate_image_to_image(self, prompt_mash, image_input_path, image_strength, steps, cfg_scale, width, height, lora_scale, seed):
483
+ """Generates an image using the image-to-image pipeline."""
484
+ generator = torch.Generator(device=DEVICE).manual_seed(seed)
485
+ self.pipe_i2i.to(DEVICE)
486
+ image_input = load_image_from_path(image_input_path)
487
+ with calculateDuration("Generating image to image"):
488
+ final_image = self.pipe_i2i(
489
+ prompt=prompt_mash,
490
+ image=image_input,
491
+ strength=image_strength,
492
+ num_inference_steps=steps,
493
+ guidance_scale=cfg_scale,
494
+ width=width,
495
+ height=height,
496
+ generator=generator,
497
+ joint_attention_kwargs={"scale": lora_scale},
498
+ output_type="pil",
499
+ ).images[0]
500
+ return final_image
501
+
502
+ ##############################
503
+ # ===== frontend.py =====
504
+ ##############################
505
+ # The original code used a decorator from a module named `spaces`.
506
+ # If unavailable, we define a dummy decorator.
507
+ try:
508
+ import spaces
509
+ except ImportError:
510
+ class spaces:
511
+ @staticmethod
512
+ def GPU(duration):
513
+ def decorator(func):
514
+ return func
515
+ return decorator
516
+
517
+ class Frontend:
518
+ def __init__(self, model_manager: ModelManager):
519
+ self.model_manager = model_manager
520
+ self.loras = loras # Use the default LoRA list defined above.
521
+ self.load_initial_loras()
522
+ self.css = self.define_css()
523
+
524
+ def define_css(self):
525
+ # Clean and professional CSS styling.
526
+ return '''
527
+ /* Title Styling */
528
+ #title {
529
+ text-align: center;
530
+ margin-bottom: 20px;
531
+ }
532
+ #title h1 {
533
+ font-size: 2.5rem;
534
+ margin: 0;
535
+ color: #333;
536
+ }
537
+ /* Button and Column Styling */
538
+ #gen_btn {
539
+ width: 100%;
540
+ padding: 12px;
541
+ font-weight: bold;
542
+ border-radius: 5px;
543
+ }
544
+ #gen_column {
545
+ display: flex;
546
+ align-items: center;
547
+ justify-content: center;
548
+ }
549
+ /* Gallery and List Styling */
550
+ #gallery .grid-wrap {
551
+ margin-top: 15px;
552
+ }
553
+ #lora_list {
554
+ background-color: #f5f5f5;
555
+ padding: 10px;
556
+ border-radius: 4px;
557
+ font-size: 0.9rem;
558
+ }
559
+ .card_internal {
560
+ display: flex;
561
+ align-items: center;
562
+ height: 100px;
563
+ margin-top: 10px;
564
+ }
565
+ .card_internal img {
566
+ margin-right: 10px;
567
+ }
568
+ .styler {
569
+ --form-gap-width: 0px !important;
570
+ }
571
+ /* Progress Bar Styling */
572
+ .progress-container {
573
+ width: 100%;
574
+ height: 20px;
575
+ background-color: #e0e0e0;
576
+ border-radius: 10px;
577
+ overflow: hidden;
578
+ margin-bottom: 20px;
579
+ }
580
+ .progress-bar {
581
+ height: 100%;
582
+ background-color: #4f46e5;
583
+ transition: width 0.3s ease-in-out;
584
+ width: calc(var(--current) / var(--total) * 100%);
585
+ }
586
+ '''
587
+
588
+ def load_initial_loras(self):
589
+ try:
590
+ from lora import loras as loras_list
591
+ self.loras = loras_list
592
+ except ImportError:
593
+ print("Warning: lora.py not found, using placeholder LoRAs.")
594
+ pass
595
+
596
+ @spaces.GPU(duration=300)
597
+ def run_lora(self, prompt, image_input, image_strength, cfg_scale, steps, selected_index,
598
+ randomize_seed, seed, width, height, lora_scale, use_enhancer,
599
+ progress=gr.Progress(track_tqdm=True)):
600
+ seed = randomize_seed_if_needed(randomize_seed, seed, MAX_SEED)
601
+ # Prepare the prompt using the selected LoRA trigger word.
602
+ prompt_mash = prepare_prompt(prompt, selected_index, self.loras)
603
+ enhanced_text = ""
604
+
605
+ # Optionally enhance the prompt.
606
+ if use_enhancer:
607
+ for enhanced_chunk in generate(prompt_mash):
608
+ enhanced_text = enhanced_chunk
609
+ yield None, seed, gr.update(visible=False), enhanced_text
610
+ prompt_mash = enhanced_text
611
+ else:
612
+ enhanced_text = ""
613
+
614
+ selected_lora = self.loras[selected_index]
615
+ unload_lora_weights(self.model_manager.pipe, self.model_manager.pipe_i2i)
616
+ pipe_to_use = self.model_manager.pipe_i2i if image_input is not None else self.model_manager.pipe
617
+ load_lora_weights_into_pipeline(pipe_to_use, selected_lora["repo"], selected_lora.get("weights"))
618
+
619
+ if image_input is not None:
620
+ final_image = self.model_manager.generate_image_to_image(
621
+ prompt_mash, image_input, image_strength, steps, cfg_scale, width, height, lora_scale, seed
622
+ )
623
+ yield final_image, seed, gr.update(visible=False), enhanced_text
624
+ else:
625
+ image_generator = self.model_manager.generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale)
626
+ final_image = None
627
+ step_counter = 0
628
+ for image in image_generator:
629
+ step_counter += 1
630
+ final_image = image
631
+ progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
632
+ yield image, seed, gr.update(value=progress_bar, visible=True), enhanced_text
633
+ yield final_image, seed, gr.update(value=progress_bar, visible=False), enhanced_text
634
+
635
+ def create_ui(self):
636
+ with gr.Blocks(theme=gr.themes.Base(), css=self.css, title="Flux LoRA Generation") as app:
637
+ title = gr.HTML(
638
+ """<h1>Flux LoRA Generation</h1>""",
639
+ elem_id="title",
640
+ )
641
+ selected_index = gr.State(None)
642
+
643
+ with gr.Row():
644
+ with gr.Column(scale=3):
645
+ prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Choose the LoRA and type the prompt")
646
+ with gr.Column(scale=1, elem_id="gen_column"):
647
+ generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
648
+ with gr.Row():
649
+ with gr.Column():
650
+ selected_info = gr.Markdown("")
651
+ gallery = gr.Gallery(
652
+ [(item["image"], item["title"]) for item in self.loras],
653
+ label="LoRA Collection",
654
+ allow_preview=False,
655
+ columns=3,
656
+ elem_id="gallery",
657
+ show_share_button=False
658
+ )
659
+ with gr.Group():
660
+ custom_lora = gr.Textbox(label="Enter Custom LoRA", placeholder="prithivMLmods/Canopus-LoRA-Flux-Anime")
661
+ gr.Markdown("[Check the list of FLUX LoRA's](https://huggingface.co/models?other=base_model:adapter:black-forest-labs/FLUX.1-dev)", elem_id="lora_list")
662
+ custom_lora_info = gr.HTML(visible=False)
663
+ custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
664
+ with gr.Column():
665
+ progress_bar = gr.Markdown(elem_id="progress", visible=False)
666
+ result = gr.Image(label="Generated Image")
667
+
668
+ with gr.Row():
669
+ with gr.Accordion("Advanced Settings", open=False):
670
+ with gr.Row():
671
+ input_image = gr.Image(label="Input image", type="filepath")
672
+ image_strength = gr.Slider(label="Denoise Strength", info="Lower means more image influence", minimum=0.1, maximum=1.0, step=0.01, value=0.75)
673
+ with gr.Column():
674
+ with gr.Row():
675
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=3.5)
676
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=28)
677
+ with gr.Row():
678
+ width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
679
+ height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1024)
680
+ with gr.Row():
681
+ randomize_seed = gr.Checkbox(True, label="Randomize seed")
682
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
683
+ lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=3, step=0.01, value=0.95)
684
+ with gr.Row():
685
+ use_enhancer = gr.Checkbox(value=False, label="Use Prompt Enhancer")
686
+ show_enhanced_prompt = gr.Checkbox(value=False, label="Display Enhanced Prompt")
687
+ enhanced_prompt_box = gr.Textbox(label="Enhanced Prompt", visible=False)
688
+
689
+ gallery.select(
690
+ update_selection,
691
+ inputs=[width, height, gr.State(self.loras)],
692
+ outputs=[prompt, selected_info, selected_index, width, height]
693
+ )
694
+ custom_lora.input(
695
+ add_custom_lora,
696
+ inputs=[custom_lora, gr.State(self.loras)],
697
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
698
+ )
699
+ custom_lora_button.click(
700
+ remove_custom_lora,
701
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
702
+ )
703
+
704
+ show_enhanced_prompt.change(fn=lambda show: gr.update(visible=show),
705
+ inputs=show_enhanced_prompt,
706
+ outputs=enhanced_prompt_box)
707
+
708
+ gr.on(
709
+ triggers=[generate_button.click, prompt.submit],
710
+ fn=self.run_lora,
711
+ inputs=[prompt, input_image, image_strength, cfg_scale, steps, selected_index,
712
+ randomize_seed, seed, width, height, lora_scale, use_enhancer],
713
+ outputs=[result, seed, progress_bar, enhanced_prompt_box]
714
+ )
715
+
716
+ with gr.Row():
717
+ gr.HTML("<div style='text-align:center; font-size:0.9em; margin-top:20px;'>Credits: <a href='https://ruslanmv.com' target='_blank'>ruslanmv.com</a></div>")
718
+
719
+ return app
720
+
721
+ ##############################
722
+ # ===== Main app.py =====
723
+ ##############################
724
+ if __name__ == "__main__":
725
+ # Get the Hugging Face token from the environment.
726
  hf_token = os.environ.get("HF_TOKEN")
727
  if not hf_token:
728
  raise ValueError("Hugging Face token (HF_TOKEN) not found in environment variables. Please set it.")
 
729
  model_manager = ModelManager(hf_token=hf_token)
730
  frontend = Frontend(model_manager)
731
  app = frontend.create_ui()
 
 
732
  app.queue()
733
+ app.launch()