#8 from huggingface_hub.constants import HF_HUB_CACHE from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel import torch import torch._dynamo import gc import os from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny from PIL.Image import Image from pipelines.models import TextToImageRequest from torch import Generator from diffusers import FluxTransformer2DModel, DiffusionPipeline from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True" os.environ["TOKENIZERS_PARALLELISM"] = "True" torch._dynamo.config.suppress_errors = True Pipeline = None ids = "golaststep/5000" Revision = "1cda9987d38b4015ff99015c43a929b42607af9b" def load_pipeline() -> Pipeline: path = os.path.join(HF_HUB_CACHE, "models--golaststep--5000/snapshots/1cda9987d38b4015ff99015c43a929b42607af9b/transformer") transformer = FluxTransformer2DModel.from_pretrained(path, torch_dtype=torch.bfloat16, use_safetensors=False) pipeline = FluxPipeline.from_pretrained(ids, revision=Revision, transformer=transformer, local_files_only=True, torch_dtype=torch.bfloat16,) pipeline.to("cuda") quantize_(pipeline.vae, int8_weight_only()) pipeline.transformer = torch.compile(pipeline.transformer, mode="max-autotune", fullgraph=True) for _ in range(3): pipeline(prompt="insensible, timbale, pothery, electrovital, actinogram, taxis, intracerebellar, centrodesmus", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256) return pipeline @torch.no_grad() def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image: generator = Generator(pipeline.device).manual_seed(request.seed) return pipeline( request.prompt, generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, ).images[0]