|
import os |
|
from diffusers import StableDiffusionPipeline |
|
import ffmpeg |
|
import gradio as gr |
|
|
|
|
|
token = os.getenv("HUGGINGFACE_TOKEN") |
|
|
|
print(f"Using Hugging Face Token: {os.getenv('HUGGINGFACE_TOKEN')}") |
|
|
|
model = StableDiffusionPipeline.from_pretrained( |
|
"runwayml/stable-diffusion-v1-5" |
|
) |
|
model.to("cpu") |
|
|
|
def generate_video(prompt): |
|
|
|
frames = [] |
|
for i in range(5): |
|
image = model(prompt).images[0] |
|
frame_path = f"frame_{i}.png" |
|
image.save(frame_path) |
|
frames.append(frame_path) |
|
|
|
|
|
output_video = "output.mp4" |
|
( |
|
ffmpeg |
|
.input("frame_%d.png", framerate=1) |
|
.output(output_video) |
|
.run(overwrite_output=True) |
|
) |
|
|
|
|
|
for frame in frames: |
|
os.remove(frame) |
|
|
|
return output_video |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# AI Video Generator") |
|
prompt_input = gr.Textbox(label="Enter your video prompt", placeholder="Type something creative...") |
|
video_output = gr.File(label="Download Your Video") |
|
generate_button = gr.Button("Generate Video") |
|
|
|
generate_button.click(fn=generate_video, inputs=prompt_input, outputs=video_output) |
|
|
|
demo.launch() |