AlekseyCalvin commited on
Commit
7d7b9fe
·
verified ·
1 Parent(s): 2b554aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -8
app.py CHANGED
@@ -24,7 +24,7 @@ os.environ["TRANSFORMERS_CACHE"] = cache_path
24
  os.environ["HF_HUB_CACHE"] = cache_path
25
  os.environ["HF_HOME"] = cache_path
26
 
27
- torch.set_float32_matmul_precision("high")
28
 
29
  #torch._inductor.config.conv_1x1_as_mm = True
30
  #torch._inductor.config.coordinate_descent_tuning = True
@@ -40,6 +40,16 @@ with open('loras.json', 'r') as f:
40
  #base_model = "stabilityai/stable-diffusion-3.5-large"
41
  pipe = AutoPipelineForText2Image.from_pretrained("ariG23498/sd-3.5-merged", torch_dtype=torch.bfloat16)
42
 
 
 
 
 
 
 
 
 
 
 
43
  #clipmodel = 'norm'
44
  #if clipmodel == "long":
45
  # model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
@@ -85,9 +95,10 @@ class calculateDuration:
85
 
86
  def update_selection(evt: gr.SelectData, width, height):
87
  selected_lora = loras[evt.index]
88
- new_placeholder = f"Type a prompt for {selected_lora['title']}"
89
  lora_repo = selected_lora["repo"]
90
- updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✨"
 
91
  if "aspect" in selected_lora:
92
  if selected_lora["aspect"] == "portrait":
93
  width = 768
@@ -103,7 +114,7 @@ def update_selection(evt: gr.SelectData, width, height):
103
  height,
104
  )
105
 
106
- @spaces.GPU(duration=70)
107
  def infer(prompt, negative_prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
108
  pipe.to("cuda")
109
  generator = torch.Generator(device="cuda").manual_seed(seed)
@@ -128,7 +139,7 @@ def run_lora(prompt, negative_prompt, cfg_scale, steps, selected_index, randomiz
128
 
129
  selected_lora = loras[selected_index]
130
  lora_path = selected_lora["repo"]
131
- trigger_word = selected_lora["trigger_word"]
132
 
133
  # Load LoRA weights
134
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
@@ -158,17 +169,17 @@ css = '''
158
  '''
159
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
160
  title = gr.HTML(
161
- """<h1><img src="https://huggingface.co/AlekseyCalvin/StabledHSTorY_SD3.5_LoRA_V2_rank256/resolve/main/acs62v.png" alt="LoRA">Stabled LoRAs soon® on S.D.3.5 Merged</h1>""",
162
  elem_id="title",
163
  )
164
  # Info blob stating what the app is running
165
  info_blob = gr.HTML(
166
- """<div id="info_blob">SOON®'s curated LoRa Gallery & Art Manufactory.|Now testing HST-triggered historic photo-trained LoRAs for Stable Diffusion 3.5.</div>"""
167
  )
168
 
169
  # Info blob stating what the app is running
170
  info_blob = gr.HTML(
171
- """<div id="info_blob">Prephrase prompts w/: "HST style autochrome photo" </div>"""
172
  )
173
  selected_index = gr.State(None)
174
  with gr.Row():
 
24
  os.environ["HF_HUB_CACHE"] = cache_path
25
  os.environ["HF_HOME"] = cache_path
26
 
27
+ torch.set_float32_matmul_precision("medium")
28
 
29
  #torch._inductor.config.conv_1x1_as_mm = True
30
  #torch._inductor.config.coordinate_descent_tuning = True
 
40
  #base_model = "stabilityai/stable-diffusion-3.5-large"
41
  pipe = AutoPipelineForText2Image.from_pretrained("ariG23498/sd-3.5-merged", torch_dtype=torch.bfloat16)
42
 
43
+ model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
44
+ config = CLIPConfig.from_pretrained(model_id)
45
+ config.text_config.max_position_embeddings = 248
46
+ clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True)
47
+ clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248)
48
+ pipe.tokenizer = clip_processor.tokenizer
49
+ pipe.text_encoder = clip_model.text_model
50
+ pipe.tokenizer_max_length = 248
51
+ pipe.text_encoder.dtype = torch.bfloat16
52
+
53
  #clipmodel = 'norm'
54
  #if clipmodel == "long":
55
  # model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
 
95
 
96
  def update_selection(evt: gr.SelectData, width, height):
97
  selected_lora = loras[evt.index]
98
+ new_placeholder = f"Prompt with activator word(s): '{selected_lora['trigger_word']}'! "
99
  lora_repo = selected_lora["repo"]
100
+ lora_trigger = selected_lora['trigger_word']
101
+ updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}). Prompt using: '{lora_trigger}'!"
102
  if "aspect" in selected_lora:
103
  if selected_lora["aspect"] == "portrait":
104
  width = 768
 
114
  height,
115
  )
116
 
117
+ @spaces.GPU(duration=50)
118
  def infer(prompt, negative_prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress):
119
  pipe.to("cuda")
120
  generator = torch.Generator(device="cuda").manual_seed(seed)
 
139
 
140
  selected_lora = loras[selected_index]
141
  lora_path = selected_lora["repo"]
142
+ trigger_word = selected_lora['trigger_word']
143
 
144
  # Load LoRA weights
145
  with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"):
 
169
  '''
170
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
171
  title = gr.HTML(
172
+ """<h1><img src="https://huggingface.co/AlekseyCalvin/StabledHSTorY_SD3.5_LoRA_V2_rank256/resolve/main/acs62v.png" alt="LoRA">Stabled LoRAs soon® on S.D.3.5L Merged</h1>""",
173
  elem_id="title",
174
  )
175
  # Info blob stating what the app is running
176
  info_blob = gr.HTML(
177
+ """<div id="info_blob">SOON®'s curated Art Manufactory & Gallery of fine-tuned Low-Rank Adapter (LoRA) models for Stable Diffusion 3.5 Large (S.D.3.5L). Running on a base model variant averaging weights b/w slow S.D.3.5L & its turbo distillation.</div>"""
178
  )
179
 
180
  # Info blob stating what the app is running
181
  info_blob = gr.HTML(
182
+ """<div id="info_blob"> To reinforce/focus a selected adapter style, add its pre-encoded “trigger" word/phrase to your prompt. Corresponding activator info &/or prompt template appears once an adapter square is clicked. Copy/Paste these into prompt box as a starting point. </div>"""
183
  )
184
  selected_index = gr.State(None)
185
  with gr.Row():