andrewchwalik's picture
Update app.py
1004b05 verified
import os
from diffusers import StableDiffusionPipeline
import ffmpeg
import gradio as gr
# Retrieve the token from Hugging Face secrets
token = os.getenv("HUGGINGFACE_TOKEN")
print(f"Using Hugging Face Token: {os.getenv('HUGGINGFACE_TOKEN')}")
model = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5"
)
model.to("cpu") # Use CPU since GPU is not available
def generate_video(prompt):
# Generate frames
frames = []
for i in range(5): # Adjust the number of frames for your video
image = model(prompt).images[0]
frame_path = f"frame_{i}.png"
image.save(frame_path)
frames.append(frame_path)
# Combine frames into a video
output_video = "output.mp4"
(
ffmpeg
.input("frame_%d.png", framerate=1) # Adjust framerate
.output(output_video)
.run(overwrite_output=True)
)
# Clean up frames
for frame in frames:
os.remove(frame)
return output_video # Path to the generated video
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# AI Video Generator")
prompt_input = gr.Textbox(label="Enter your video prompt", placeholder="Type something creative...")
video_output = gr.File(label="Download Your Video")
generate_button = gr.Button("Generate Video")
generate_button.click(fn=generate_video, inputs=prompt_input, outputs=video_output)
demo.launch()