File size: 2,273 Bytes
fcccdb3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
from diffusers import AutoencoderKL, AutoencoderTiny, FluxPipeline
from diffusers.image_processor import VaeImageProcessor
import torch
import torch._dynamo
import gc
from PIL import Image
from pipelines.models import TextToImageRequest
from torch import Generator
# from torchao.quantization import quantize_, int8_weight_only
Pipeline = None
MODEL_ID = "black-forest-labs/FLUX.1-schnell"
DTYPE = torch.bfloat16
def clear():
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
@torch.inference_mode()
def load_pipeline() -> Pipeline:
clear()
vae = AutoencoderTiny.from_pretrained("golaststep/FLUX.1-schnell", torch_dtype=DTYPE)
# pipeline = DiffusionPipeline.from_pretrained(
# MODEL_ID,
# vae=vae,
# torch_dtype=dtype,
# )
pipeline = FluxPipeline.from_pretrained(MODEL_ID,vae=vae,
torch_dtype=DTYPE)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.cuda.set_per_process_memory_fraction(0.9)
pipeline.text_encoder.to(memory_format=torch.channels_last)
pipeline.text_encoder_2.to(memory_format=torch.channels_last)
pipeline.transformer.to(memory_format=torch.channels_last)
pipeline.vae.to(memory_format=torch.channels_last)
pipeline.vae = torch.compile(pipeline.vae)
pipeline._exclude_from_cpu_offload = ["vae"]
pipeline.enable_sequential_cpu_offload()
clear()
for _ in range(1):
pipeline(prompt="unpervaded, unencumber, froggish, groundneedle, transnatural, fatherhood, outjump, cinerator", width=1024, height=1024, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256)
return pipeline
sample = True
@torch.inference_mode()
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
global sample
if sample:
clear()
sample = None
torch.cuda.reset_peak_memory_stats()
generator = Generator("cuda").manual_seed(request.seed)
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
return(image) |