File size: 1,422 Bytes
32ab995
0f2dddc
 
6238b1d
0f2dddc
32ab995
 
 
0ee0afa
 
32ab995
d4f70fe
32ab995
1004b05
e2436ee
 
ef1162a
0f2dddc
6238b1d
0f2dddc
 
 
 
 
ef1162a
0f2dddc
 
 
6238b1d
0f2dddc
 
 
e2436ee
6238b1d
0f2dddc
 
e2436ee
6238b1d
ef1162a
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os
from diffusers import StableDiffusionPipeline
import ffmpeg
import gradio as gr

# Retrieve the token from Hugging Face secrets
token = os.getenv("HUGGINGFACE_TOKEN")

print(f"Using Hugging Face Token: {os.getenv('HUGGINGFACE_TOKEN')}")

model = StableDiffusionPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5"
)
model.to("cpu")  # Use CPU since GPU is not available

def generate_video(prompt):
    # Generate frames
    frames = []
    for i in range(5):  # Adjust the number of frames for your video
        image = model(prompt).images[0]
        frame_path = f"frame_{i}.png"
        image.save(frame_path)
        frames.append(frame_path)

    # Combine frames into a video
    output_video = "output.mp4"
    (
        ffmpeg
        .input("frame_%d.png", framerate=1)  # Adjust framerate
        .output(output_video)
        .run(overwrite_output=True)
    )

    # Clean up frames
    for frame in frames:
        os.remove(frame)

    return output_video  # Path to the generated video

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# AI Video Generator")
    prompt_input = gr.Textbox(label="Enter your video prompt", placeholder="Type something creative...")
    video_output = gr.File(label="Download Your Video")
    generate_button = gr.Button("Generate Video")

    generate_button.click(fn=generate_video, inputs=prompt_input, outputs=video_output)

demo.launch()