Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
|
2 |
-
|
3 |
##############################
|
4 |
# ===== Standard Imports =====
|
5 |
##############################
|
@@ -10,12 +8,21 @@ import random
|
|
10 |
import json
|
11 |
from math import floor
|
12 |
from typing import Any, Dict, List, Optional, Union
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
import torch
|
15 |
import numpy as np
|
16 |
import requests
|
17 |
from PIL import Image
|
18 |
import spaces
|
|
|
19 |
# Diffusers imports
|
20 |
from diffusers import (
|
21 |
DiffusionPipeline,
|
@@ -28,14 +35,12 @@ from diffusers.utils import load_image
|
|
28 |
# Hugging Face Hub
|
29 |
from huggingface_hub import ModelCard, HfFileSystem
|
30 |
|
31 |
-
|
32 |
# Gradio (UI)
|
33 |
import gradio as gr
|
34 |
|
35 |
##############################
|
36 |
# ===== config.py =====
|
37 |
##############################
|
38 |
-
# Configuration parameters
|
39 |
DTYPE = torch.bfloat16
|
40 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
41 |
BASE_MODEL = "black-forest-labs/FLUX.1-dev"
|
@@ -167,7 +172,7 @@ def generate(message, max_new_tokens=256, temperature=0.9, top_p=0.95, repetitio
|
|
167 |
##############################
|
168 |
# ===== lora_handling.py =====
|
169 |
##############################
|
170 |
-
# A default list of LoRAs for the UI
|
171 |
loras = [
|
172 |
{"image": "placeholder.jpg", "title": "Placeholder LoRA", "repo": "placeholder/repo", "weights": None, "trigger_word": ""}
|
173 |
]
|
@@ -446,7 +451,7 @@ class ModelManager:
|
|
446 |
"""Initializes the diffusion pipelines and autoencoders."""
|
447 |
self.taef1 = AutoencoderTiny.from_pretrained(TAEF1_MODEL, torch_dtype=DTYPE).to(DEVICE)
|
448 |
self.good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=DTYPE).to(DEVICE)
|
449 |
-
# Optionally,
|
450 |
self.pipe = DiffusionPipeline.from_pretrained(BASE_MODEL, torch_dtype=DTYPE, vae=self.taef1)
|
451 |
self.pipe = self.pipe.to(DEVICE)
|
452 |
self.pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
@@ -459,8 +464,9 @@ class ModelManager:
|
|
459 |
tokenizer_2=self.pipe.tokenizer_2,
|
460 |
torch_dtype=DTYPE,
|
461 |
).to(DEVICE)
|
462 |
-
#
|
463 |
-
|
|
|
464 |
|
465 |
@spaces.GPU(duration=100)
|
466 |
def generate_image(self, prompt_mash, steps, seed, cfg_scale, width, height, lora_scale):
|
@@ -504,8 +510,6 @@ class ModelManager:
|
|
504 |
##############################
|
505 |
# ===== frontend.py =====
|
506 |
##############################
|
507 |
-
|
508 |
-
|
509 |
class Frontend:
|
510 |
def __init__(self, model_manager: ModelManager):
|
511 |
self.model_manager = model_manager
|
@@ -722,4 +726,5 @@ if __name__ == "__main__":
|
|
722 |
frontend = Frontend(model_manager)
|
723 |
app = frontend.create_ui()
|
724 |
app.queue()
|
725 |
-
|
|
|
|
|
|
|
|
1 |
##############################
|
2 |
# ===== Standard Imports =====
|
3 |
##############################
|
|
|
8 |
import json
|
9 |
from math import floor
|
10 |
from typing import Any, Dict, List, Optional, Union
|
11 |
+
|
12 |
+
# Local import for default LoRA list (if available)
|
13 |
+
try:
|
14 |
+
from flux_app.lora import loras
|
15 |
+
except ImportError:
|
16 |
+
loras = [
|
17 |
+
{"image": "placeholder.jpg", "title": "Placeholder LoRA", "repo": "placeholder/repo", "weights": None, "trigger_word": ""}
|
18 |
+
]
|
19 |
+
|
20 |
import torch
|
21 |
import numpy as np
|
22 |
import requests
|
23 |
from PIL import Image
|
24 |
import spaces
|
25 |
+
|
26 |
# Diffusers imports
|
27 |
from diffusers import (
|
28 |
DiffusionPipeline,
|
|
|
35 |
# Hugging Face Hub
|
36 |
from huggingface_hub import ModelCard, HfFileSystem
|
37 |
|
|
|
38 |
# Gradio (UI)
|
39 |
import gradio as gr
|
40 |
|
41 |
##############################
|
42 |
# ===== config.py =====
|
43 |
##############################
|
|
|
44 |
DTYPE = torch.bfloat16
|
45 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
46 |
BASE_MODEL = "black-forest-labs/FLUX.1-dev"
|
|
|
172 |
##############################
|
173 |
# ===== lora_handling.py =====
|
174 |
##############################
|
175 |
+
# A default list of LoRAs for the UI
|
176 |
loras = [
|
177 |
{"image": "placeholder.jpg", "title": "Placeholder LoRA", "repo": "placeholder/repo", "weights": None, "trigger_word": ""}
|
178 |
]
|
|
|
451 |
"""Initializes the diffusion pipelines and autoencoders."""
|
452 |
self.taef1 = AutoencoderTiny.from_pretrained(TAEF1_MODEL, torch_dtype=DTYPE).to(DEVICE)
|
453 |
self.good_vae = AutoencoderKL.from_pretrained(BASE_MODEL, subfolder="vae", torch_dtype=DTYPE).to(DEVICE)
|
454 |
+
# Optionally, pass use_auth_token=self.hf_token if needed.
|
455 |
self.pipe = DiffusionPipeline.from_pretrained(BASE_MODEL, torch_dtype=DTYPE, vae=self.taef1)
|
456 |
self.pipe = self.pipe.to(DEVICE)
|
457 |
self.pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
|
|
464 |
tokenizer_2=self.pipe.tokenizer_2,
|
465 |
torch_dtype=DTYPE,
|
466 |
).to(DEVICE)
|
467 |
+
# Instead of binding to the instance (which fails due to __slots__),
|
468 |
+
# bind the custom method to the pipeline’s class.
|
469 |
+
self.pipe.__class__.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images
|
470 |
|
471 |
@spaces.GPU(duration=100)
|
472 |
def generate_image(self, prompt_mash, steps, seed, cfg_scale, width, height, lora_scale):
|
|
|
510 |
##############################
|
511 |
# ===== frontend.py =====
|
512 |
##############################
|
|
|
|
|
513 |
class Frontend:
|
514 |
def __init__(self, model_manager: ModelManager):
|
515 |
self.model_manager = model_manager
|
|
|
726 |
frontend = Frontend(model_manager)
|
727 |
app = frontend.create_ui()
|
728 |
app.queue()
|
729 |
+
# Set share=True to create a public link if desired.
|
730 |
+
app.launch(share=False, debug=True)
|