AlekseyCalvin commited on
Commit
2b716c4
·
verified ·
1 Parent(s): 74082e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -6
app.py CHANGED
@@ -3,18 +3,24 @@ import gradio as gr
3
  import json
4
  import logging
5
  import torch
6
- from os import path
7
  from PIL import Image
 
 
 
 
 
8
  import spaces
9
- from diffusers import DiffusionPipeline, AutoPipelineForText2Image
10
- from diffusers import StableDiffusion3Pipeline, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel # pip install diffusers>=0.31.0
11
  from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
 
12
  import copy
13
  import random
14
  import time
 
 
15
  from huggingface_hub import HfFileSystem, ModelCard
16
  from huggingface_hub import login, hf_hub_download
17
- import safetensors.torch
18
  from safetensors.torch import load_file
19
  hf_token = os.environ.get("HF_TOKEN")
20
  login(token=hf_token)
@@ -24,7 +30,7 @@ os.environ["TRANSFORMERS_CACHE"] = cache_path
24
  os.environ["HF_HUB_CACHE"] = cache_path
25
  os.environ["HF_HOME"] = cache_path
26
 
27
- torch.set_float32_matmul_precision("medium")
28
 
29
  #torch._inductor.config.conv_1x1_as_mm = True
30
  #torch._inductor.config.coordinate_descent_tuning = True
@@ -38,7 +44,14 @@ with open('loras.json', 'r') as f:
38
 
39
  # Initialize the base model
40
  #base_model = "stabilityai/stable-diffusion-3.5-large"
41
- pipe = AutoPipelineForText2Image.from_pretrained("ariG23498/sd-3.5-merged", torch_dtype=torch.bfloat16)
 
 
 
 
 
 
 
42
 
43
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
44
  config = CLIPConfig.from_pretrained(model_id)
 
3
  import json
4
  import logging
5
  import torch
 
6
  from PIL import Image
7
+ from os import path
8
+ from torchvision import transforms
9
+ from dataclasses import dataclass
10
+ import math
11
+ from typing import Callable
12
  import spaces
13
+ from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL, AutoPipelineForText2Image
14
+ from diffusers import StableDiffusion3Pipeline, FlowMatchEulerDiscreteScheduler # pip install diffusers>=0.31.0
15
  from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
16
+ from diffusers.models.transformers import SD3Transformer2DModel
17
  import copy
18
  import random
19
  import time
20
+ import safetensors.torch
21
+ from tqdm import tqdm
22
  from huggingface_hub import HfFileSystem, ModelCard
23
  from huggingface_hub import login, hf_hub_download
 
24
  from safetensors.torch import load_file
25
  hf_token = os.environ.get("HF_TOKEN")
26
  login(token=hf_token)
 
30
  os.environ["HF_HUB_CACHE"] = cache_path
31
  os.environ["HF_HOME"] = cache_path
32
 
33
+ #torch.set_float32_matmul_precision("medium")
34
 
35
  #torch._inductor.config.conv_1x1_as_mm = True
36
  #torch._inductor.config.coordinate_descent_tuning = True
 
44
 
45
  # Initialize the base model
46
  #base_model = "stabilityai/stable-diffusion-3.5-large"
47
+ # Initialize the base model
48
+ dtype = torch.bfloat16
49
+ base_model = "ariG23498/sd-3.5-merged"
50
+ pipe = AutoPipelineForText2Image.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
51
+ #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
52
+ torch.cuda.empty_cache()
53
+
54
+ device = "cuda" if torch.cuda.is_available() else "cpu"
55
 
56
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
57
  config = CLIPConfig.from_pretrained(model_id)