|
from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny |
|
from diffusers.image_processor import VaeImageProcessor |
|
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler |
|
from huggingface_hub.constants import HF_HUB_CACHE |
|
from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel |
|
import torch |
|
import torch._dynamo |
|
import gc |
|
from PIL import Image as img |
|
from PIL.Image import Image |
|
from pipelines.models import TextToImageRequest |
|
from torch import Generator |
|
import time |
|
from diffusers import FluxTransformer2DModel, DiffusionPipeline |
|
from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only |
|
import os |
|
os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" |
|
torch._dynamo.config.suppress_errors = True |
|
|
|
Pipeline = None |
|
|
|
ckpt_id = "black-forest-labs/FLUX.1-schnell" |
|
ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9" |
|
def empty_cache(): |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
torch.cuda.reset_max_memory_allocated() |
|
torch.cuda.reset_peak_memory_stats() |
|
|
|
def load_pipeline() -> Pipeline: |
|
empty_cache() |
|
|
|
dtype, device = torch.bfloat16, "cuda" |
|
|
|
text_encoder_2 = T5EncoderModel.from_pretrained( |
|
"city96/t5-v1_1-xxl-encoder-bf16", revision = "1b9c856aadb864af93c1dcdc226c2774fa67bc86", torch_dtype=torch.bfloat16 |
|
).to(memory_format=torch.channels_last) |
|
|
|
vae = AutoencoderTiny.from_pretrained("RobertML/FLUX.1-schnell-vae_fx", revision="00c83cdfdfe46992eb0ed45921eee34261fcb56e", torch_dtype=dtype) |
|
path = os.path.join(HF_HUB_CACHE, "models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a") |
|
model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False).to(memory_format=torch.channels_last) |
|
pipeline = FluxPipeline.from_pretrained( |
|
ckpt_id, |
|
vae=vae, |
|
revision=ckpt_revision, |
|
transformer=model, |
|
text_encoder_2=text_encoder_2, |
|
torch_dtype=dtype, |
|
).to(device) |
|
pipeline.transformer = torch.compile(pipeline.transformer, mode="reduce-overhead") |
|
quantize_(pipeline.vae, int8_weight_only()) |
|
for _ in range(3): |
|
pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) |
|
|
|
empty_cache() |
|
return pipeline |
|
|
|
|
|
@torch.no_grad() |
|
def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image: |
|
try: |
|
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0] |
|
except: |
|
image = img.open("./RobertML.png") |
|
pass |
|
return(image) |
|
|