Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -156,7 +156,7 @@ def generate(segment, image, prompt, size, guidance_scale, num_inference_steps,
|
|
156 |
prompt_attention_mask = state["prompt_attention_mask"].to("cuda", dtype=torch.bfloat16)
|
157 |
image_latents = state["image_latents"].to("cuda", dtype=torch.bfloat16)
|
158 |
if segment==9:
|
159 |
-
pipe.
|
160 |
torch.cuda.empty_cache()
|
161 |
pipe.vae.to("cuda")
|
162 |
latents = latents.to(pipe.vae.dtype) / pipe.vae.config.scaling_factor
|
|
|
156 |
prompt_attention_mask = state["prompt_attention_mask"].to("cuda", dtype=torch.bfloat16)
|
157 |
image_latents = state["image_latents"].to("cuda", dtype=torch.bfloat16)
|
158 |
if segment==9:
|
159 |
+
pipe.transformer.to('cpu')
|
160 |
torch.cuda.empty_cache()
|
161 |
pipe.vae.to("cuda")
|
162 |
latents = latents.to(pipe.vae.dtype) / pipe.vae.config.scaling_factor
|