Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -51,9 +51,9 @@ global pipe
|
|
51 |
pipe = HunyuanVideoPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.bfloat16)
|
52 |
pipe.to("cuda")
|
53 |
# Enable memory savings
|
54 |
-
pipe.vae.enable_slicing()
|
55 |
-
pipe.vae.enable_tiling()
|
56 |
-
pipe.enable_model_cpu_offload()
|
57 |
|
58 |
with torch.no_grad(): # enable image inputs
|
59 |
initial_input_channels = pipe.transformer.config.in_channels
|
@@ -111,7 +111,8 @@ def resize_image_to_bucket(image: Union[Image.Image, np.ndarray], bucket_reso: T
|
|
111 |
image = image[crop_top:crop_top + bucket_height, crop_left:crop_left + bucket_width]
|
112 |
return image
|
113 |
|
114 |
-
@spaces.GPU(duration=120)
|
|
|
115 |
def generate_video(prompt: str, frame1: Image.Image, frame2: Image.Image, resolution: str, guidance_scale: float, num_frames: int, num_inference_steps: int) -> bytes:
|
116 |
# Debugging print statements
|
117 |
print(f"Frame 1 Type: {type(frame1)}")
|
|
|
51 |
pipe = HunyuanVideoPipeline.from_pretrained(model_id, transformer=transformer, torch_dtype=torch.bfloat16)
|
52 |
pipe.to("cuda")
|
53 |
# Enable memory savings
|
54 |
+
# pipe.vae.enable_slicing()
|
55 |
+
# pipe.vae.enable_tiling()
|
56 |
+
# pipe.enable_model_cpu_offload()
|
57 |
|
58 |
with torch.no_grad(): # enable image inputs
|
59 |
initial_input_channels = pipe.transformer.config.in_channels
|
|
|
111 |
image = image[crop_top:crop_top + bucket_height, crop_left:crop_left + bucket_width]
|
112 |
return image
|
113 |
|
114 |
+
# @spaces.GPU(duration=120)
|
115 |
+
@torch.inference_mode()
|
116 |
def generate_video(prompt: str, frame1: Image.Image, frame2: Image.Image, resolution: str, guidance_scale: float, num_frames: int, num_inference_steps: int) -> bytes:
|
117 |
# Debugging print statements
|
118 |
print(f"Frame 1 Type: {type(frame1)}")
|