Spaces:
Runtime error
Runtime error
Update custom_pipeline.py
Browse files- custom_pipeline.py +3 -7
custom_pipeline.py
CHANGED
@@ -47,10 +47,6 @@ class FluxWithCFGPipeline(FluxPipeline):
|
|
47 |
Extends the FluxPipeline to yield intermediate images during the denoising process
|
48 |
with progressively increasing resolution for faster generation.
|
49 |
"""
|
50 |
-
def __init__(self, *args, **kwargs):
|
51 |
-
super().__init__(*args, **kwargs)
|
52 |
-
self.default_sample_size = 512 # Default sample size from the first pipeline
|
53 |
-
|
54 |
@torch.inference_mode()
|
55 |
def generate_images(
|
56 |
self,
|
@@ -106,7 +102,6 @@ class FluxWithCFGPipeline(FluxPipeline):
|
|
106 |
max_sequence_length=max_sequence_length,
|
107 |
lora_scale=lora_scale,
|
108 |
)
|
109 |
-
|
110 |
# 4. Prepare latent variables
|
111 |
num_channels_latents = self.transformer.config.in_channels // 4
|
112 |
latents, latent_image_ids = self.prepare_latents(
|
@@ -119,7 +114,6 @@ class FluxWithCFGPipeline(FluxPipeline):
|
|
119 |
generator,
|
120 |
latents,
|
121 |
)
|
122 |
-
|
123 |
# 5. Prepare timesteps
|
124 |
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
|
125 |
image_seq_len = latents.shape[1]
|
@@ -156,12 +150,14 @@ class FluxWithCFGPipeline(FluxPipeline):
|
|
156 |
return_dict=False,
|
157 |
)[0]
|
158 |
|
159 |
-
|
160 |
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
161 |
torch.cuda.empty_cache()
|
162 |
|
163 |
# Final image
|
164 |
return self._decode_latents_to_image(latents, height, width, output_type)
|
|
|
|
|
165 |
|
166 |
def _decode_latents_to_image(self, latents, height, width, output_type, vae=None):
|
167 |
"""Decodes the given latents into an image."""
|
|
|
47 |
Extends the FluxPipeline to yield intermediate images during the denoising process
|
48 |
with progressively increasing resolution for faster generation.
|
49 |
"""
|
|
|
|
|
|
|
|
|
50 |
@torch.inference_mode()
|
51 |
def generate_images(
|
52 |
self,
|
|
|
102 |
max_sequence_length=max_sequence_length,
|
103 |
lora_scale=lora_scale,
|
104 |
)
|
|
|
105 |
# 4. Prepare latent variables
|
106 |
num_channels_latents = self.transformer.config.in_channels // 4
|
107 |
latents, latent_image_ids = self.prepare_latents(
|
|
|
114 |
generator,
|
115 |
latents,
|
116 |
)
|
|
|
117 |
# 5. Prepare timesteps
|
118 |
sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
|
119 |
image_seq_len = latents.shape[1]
|
|
|
150 |
return_dict=False,
|
151 |
)[0]
|
152 |
|
153 |
+
# Yield intermediate result
|
154 |
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
|
155 |
torch.cuda.empty_cache()
|
156 |
|
157 |
# Final image
|
158 |
return self._decode_latents_to_image(latents, height, width, output_type)
|
159 |
+
self.maybe_free_model_hooks()
|
160 |
+
torch.cuda.empty_cache()
|
161 |
|
162 |
def _decode_latents_to_image(self, latents, height, width, output_type, vae=None):
|
163 |
"""Decodes the given latents into an image."""
|