arshadrana commited on
Commit
bea0942
·
verified ·
1 Parent(s): 58d11ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -48
app.py CHANGED
@@ -1,58 +1,22 @@
1
  from diffusers import DiffusionPipeline
2
  import gradio as gr
3
- import moviepy.editor as mpy
4
- import os
5
 
6
- # Load the pipeline
7
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid")
8
 
9
- def generate_video(prompt, num_frames=10, duration=5, audio_path=None):
10
- # Generate images based on the prompt
11
- frames = [pipe(prompt).images[0] for _ in range(num_frames)]
 
12
 
13
- # Save frames to create a video
14
- frame_files = []
15
- for i, img in enumerate(frames):
16
- file_path = f"frame_{i}.png"
17
- img.save(file_path)
18
- frame_files.append(file_path)
19
-
20
- # Create video from frames
21
- clip = mpy.ImageSequenceClip(frame_files, fps=num_frames / duration)
22
-
23
- # Add audio if provided
24
- if audio_path and os.path.exists(audio_path):
25
- audio = mpy.AudioFileClip(audio_path)
26
- clip = clip.set_audio(audio.set_duration(clip.duration))
27
-
28
- # Save the video file
29
- video_path = "generated_video.mp4"
30
- clip.write_videofile(video_path, codec="libx264")
31
-
32
- # Clean up frame files
33
- for file_path in frame_files:
34
- os.remove(file_path)
35
-
36
- return video_path
37
-
38
- # Gradio interface
39
- def gradio_interface(prompt, num_frames, duration, audio_file):
40
- video_path = generate_video(prompt, num_frames, duration, audio_file.name if audio_file else None)
41
- return video_path
42
-
43
- # Gradio setup
44
  interface = gr.Interface(
45
- fn=gradio_interface,
46
- inputs=[
47
- gr.inputs.Textbox(label="Prompt"),
48
- gr.inputs.Slider(minimum=5, maximum=50, step=1, default=10, label="Number of Frames"),
49
- gr.inputs.Slider(minimum=1, maximum=15, step=1, default=5, label="Duration (seconds)"),
50
- gr.inputs.File(label="Audio File (Optional)"),
51
- ],
52
- outputs="video",
53
- title="Generate Video from Text Prompt with Audio",
54
- description="Generate a video based on a text prompt, with optional background music."
55
  )
56
 
57
- # Launch Gradio app
58
  interface.launch()
 
1
  from diffusers import DiffusionPipeline
2
  import gradio as gr
 
 
3
 
4
+ # Load the DiffusionPipeline
5
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid")
6
 
7
+ # Define the function for Gradio
8
+ def generate_image(prompt):
9
+ image = pipe(prompt).images[0]
10
+ return image
11
 
12
+ # Set up Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  interface = gr.Interface(
14
+ fn=generate_image,
15
+ inputs="text",
16
+ outputs="image",
17
+ title="Stable Video Diffusion Image Generator",
18
+ description="Generate an image based on your prompt using Stable Diffusion."
 
 
 
 
 
19
  )
20
 
21
+ # Launch the Gradio app
22
  interface.launch()