Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ def generate(prompt,
|
|
52 |
num_inference_steps=steps,
|
53 |
decode_timestep = 0.05,
|
54 |
decode_noise_scale = 0.025,
|
55 |
-
generator=torch.Generator().manual_seed(seed),
|
56 |
#output_type="latent",
|
57 |
).frames
|
58 |
else:
|
@@ -65,7 +65,7 @@ def generate(prompt,
|
|
65 |
num_inference_steps=steps,
|
66 |
decode_timestep = 0.05,
|
67 |
decode_noise_scale = 0.025,
|
68 |
-
generator=torch.Generator().manual_seed(seed),
|
69 |
#output_type="latent",
|
70 |
).frames
|
71 |
|
@@ -114,8 +114,9 @@ def generate(prompt,
|
|
114 |
# ).frames[0]
|
115 |
|
116 |
# Part 4. Downscale the video to the expected resolution
|
|
|
117 |
video = [frame.resize((expected_width, expected_height)) for frame in latents[0]]
|
118 |
-
export_to_video(
|
119 |
return "output.mp4"
|
120 |
|
121 |
|
|
|
52 |
num_inference_steps=steps,
|
53 |
decode_timestep = 0.05,
|
54 |
decode_noise_scale = 0.025,
|
55 |
+
generator=torch.Generator(device="cuda").manual_seed(seed),
|
56 |
#output_type="latent",
|
57 |
).frames
|
58 |
else:
|
|
|
65 |
num_inference_steps=steps,
|
66 |
decode_timestep = 0.05,
|
67 |
decode_noise_scale = 0.025,
|
68 |
+
generator=torch.Generator(device="cuda").manual_seed(seed),
|
69 |
#output_type="latent",
|
70 |
).frames
|
71 |
|
|
|
114 |
# ).frames[0]
|
115 |
|
116 |
# Part 4. Downscale the video to the expected resolution
|
117 |
+
#video = [frame.resize((expected_width, expected_height)) for frame in video]
|
118 |
video = [frame.resize((expected_width, expected_height)) for frame in latents[0]]
|
119 |
+
export_to_video(video, "output.mp4", fps=24)
|
120 |
return "output.mp4"
|
121 |
|
122 |
|