Spaces:
Runtime error
Runtime error
add : ponix generator
Browse files- __pycache__/live_preview_helpers.cpython-310.pyc +0 -0
- app.py +11 -2
__pycache__/live_preview_helpers.cpython-310.pyc
ADDED
|
Binary file (4.03 kB). View file
|
|
|
app.py
CHANGED
|
@@ -7,12 +7,22 @@ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler, Autoe
|
|
| 7 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
| 8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
| 9 |
|
|
|
|
|
|
|
|
|
|
| 10 |
dtype = torch.bfloat16
|
| 11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
| 13 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 14 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
| 15 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
torch.cuda.empty_cache()
|
| 17 |
|
| 18 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -54,8 +64,7 @@ css="""
|
|
| 54 |
with gr.Blocks(css=css) as demo:
|
| 55 |
|
| 56 |
with gr.Column(elem_id="col-container"):
|
| 57 |
-
gr.Markdown(f"""#
|
| 58 |
-
12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
|
| 59 |
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
|
| 60 |
""")
|
| 61 |
|
|
|
|
| 7 |
from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
|
| 8 |
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
| 9 |
|
| 10 |
+
from huggingface_hub import hf_hub_download
|
| 11 |
+
from safetensors.torch import load_file
|
| 12 |
+
|
| 13 |
dtype = torch.bfloat16
|
| 14 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 15 |
|
| 16 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
| 17 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
| 18 |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=dtype, vae=taef1).to(device)
|
| 19 |
+
|
| 20 |
+
# PONIX mode load
|
| 21 |
+
pipe.load_lora_weights('cwhuh/ponix-generator-v0.1.0', weight_name='pytorch_lora_weights.safetensors')
|
| 22 |
+
embedding_path = hf_hub_download(repo_id='cwhuh/ponix-generator-v0.1.0', filename='./ponix-generator-v0.1.0_emb.safetensors', repo_type="model")
|
| 23 |
+
state_dict = load_file(embedding_path)
|
| 24 |
+
pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>", "<s2>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
|
| 25 |
+
|
| 26 |
torch.cuda.empty_cache()
|
| 27 |
|
| 28 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 64 |
with gr.Blocks(css=css) as demo:
|
| 65 |
|
| 66 |
with gr.Column(elem_id="col-container"):
|
| 67 |
+
gr.Markdown(f"""# [POSTECH] PONIX Generator
|
|
|
|
| 68 |
[[non-commercial license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md)] [[blog](https://blackforestlabs.ai/announcing-black-forest-labs/)] [[model](https://huggingface.co/black-forest-labs/FLUX.1-dev)]
|
| 69 |
""")
|
| 70 |
|