forplaytvplus commited on
Commit
b608e99
·
verified ·
1 Parent(s): 4dc3375

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -65
app.py CHANGED
@@ -59,7 +59,6 @@ def generate(
59
  guidance_scale_base: float = 5.0,
60
  num_inference_steps_base: int = 25,
61
  strength_img2img: float = 0.7,
62
- is_sdxl: bool = False,
63
  use_lora: bool = False,
64
  use_lora2: bool = False,
65
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
@@ -117,14 +116,6 @@ def generate(
117
  # É SDXL 1.0 (NÃO SD 1.5)
118
  if is_sdxl:
119
  pipe.enable_model_cpu_offload()
120
- compel = Compel(
121
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
122
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
123
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
124
- requires_pooled=[False, True],
125
- truncate_long_prompts=False
126
- )
127
- conditioning, pooled = compel(prompt)
128
  generator = torch.Generator().manual_seed(seed)
129
 
130
  if not use_negative_prompt:
@@ -133,8 +124,8 @@ def generate(
133
  with pipeline_lock:
134
  if use_img2img:
135
  result = pipe(
136
- prompt_embeds=conditioning,
137
- pooled_prompt_embeds=pooled,
138
  image=init_image,
139
  strength=strength_img2img,
140
  negative_prompt=negative_prompt,
@@ -146,8 +137,7 @@ def generate(
146
  ).images[0]
147
  else:
148
  result = pipe(
149
- prompt_embeds=conditioning,
150
- pooled_prompt_embeds=pooled,
151
  negative_prompt=negative_prompt,
152
  width=width,
153
  height=height,
@@ -157,49 +147,7 @@ def generate(
157
  ).images[0]
158
 
159
  # Limpeza de memória
160
- del pipe, conditioning, pooled
161
- torch.cuda.empty_cache()
162
- gc.collect()
163
- return result
164
-
165
- # NÃO É SDXL (E SIM SD 1.5)
166
- if not is_sdxl:
167
- pipe.enable_model_cpu_offload()
168
- compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, truncate_long_prompts=False)
169
- conditioning = compel.build_conditioning_tensor(prompt)
170
- negative_conditioning = compel.build_conditioning_tensor(negative_prompt)
171
- [conditioning, negative_conditioning] = compel.pad_conditioning_tensors_to_same_length([conditioning, negative_conditioning])
172
- generator = torch.Generator().manual_seed(seed)
173
-
174
- if not use_negative_prompt:
175
- negative_prompt = None # type: ignore
176
-
177
- with pipeline_lock:
178
- if use_img2img:
179
- result = pipe(
180
- prompt_embeds=conditioning,
181
- image=init_image,
182
- strength=strength_img2img,
183
- negative_prompt_embeds=negative_conditioning,
184
- width=width,
185
- height=height,
186
- guidance_scale=guidance_scale_base,
187
- num_inference_steps=num_inference_steps_base,
188
- generator=generator,
189
- ).images[0]
190
- else:
191
- result = pipe(
192
- prompt_embeds=conditioning,
193
- negative_prompt_embeds=negative_conditioning,
194
- width=width,
195
- height=height,
196
- guidance_scale=guidance_scale_base,
197
- num_inference_steps=num_inference_steps_base,
198
- generator=generator,
199
- ).images[0]
200
-
201
- # Limpeza de memória
202
- del pipe, conditioning, negative_conditioning
203
  torch.cuda.empty_cache()
204
  gc.collect()
205
  return result
@@ -243,7 +191,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
243
  with gr.Accordion("Advanced options", open=False):
244
  with gr.Row():
245
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
246
- is_sdxl = gr.Checkbox(label='Is SDXL?', value=False)
247
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
248
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
249
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
@@ -326,13 +273,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
326
  queue=False,
327
  api_name=False,
328
  )
329
- is_sdxl.change(
330
- fn=lambda x: gr.update(visible=x),
331
- inputs=is_sdxl,
332
- outputs=is_sdxl,
333
- queue=False,
334
- api_name=False,
335
- )
336
  use_img2img.change(
337
  fn=lambda x: gr.update(visible=x),
338
  inputs=use_img2img,
@@ -364,7 +304,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
364
  guidance_scale_base,
365
  num_inference_steps_base,
366
  strength_img2img,
367
- is_sdxl,
368
  use_lora,
369
  use_lora2,
370
  model,
 
59
  guidance_scale_base: float = 5.0,
60
  num_inference_steps_base: int = 25,
61
  strength_img2img: float = 0.7,
 
62
  use_lora: bool = False,
63
  use_lora2: bool = False,
64
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
 
116
  # É SDXL 1.0 (NÃO SD 1.5)
117
  if is_sdxl:
118
  pipe.enable_model_cpu_offload()
 
 
 
 
 
 
 
 
119
  generator = torch.Generator().manual_seed(seed)
120
 
121
  if not use_negative_prompt:
 
124
  with pipeline_lock:
125
  if use_img2img:
126
  result = pipe(
127
+ prompt=prompt,
128
+ negative_prompt=negative_prompt,
129
  image=init_image,
130
  strength=strength_img2img,
131
  negative_prompt=negative_prompt,
 
137
  ).images[0]
138
  else:
139
  result = pipe(
140
+ prompt=prompt,
 
141
  negative_prompt=negative_prompt,
142
  width=width,
143
  height=height,
 
147
  ).images[0]
148
 
149
  # Limpeza de memória
150
+ del pipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  torch.cuda.empty_cache()
152
  gc.collect()
153
  return result
 
191
  with gr.Accordion("Advanced options", open=False):
192
  with gr.Row():
193
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
 
194
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
195
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
196
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
 
273
  queue=False,
274
  api_name=False,
275
  )
 
 
 
 
 
 
 
276
  use_img2img.change(
277
  fn=lambda x: gr.update(visible=x),
278
  inputs=use_img2img,
 
304
  guidance_scale_base,
305
  num_inference_steps_base,
306
  strength_img2img,
 
307
  use_lora,
308
  use_lora2,
309
  model,