mrcuddle commited on
Commit
d03a679
·
verified ·
1 Parent(s): fc982fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -18
app.py CHANGED
@@ -3,15 +3,20 @@ import torch
3
  from diffusers import I2VGenXLPipeline
4
  from diffusers.utils import export_to_gif, load_image
5
  import tempfile
 
6
 
 
 
7
  def initialize_pipeline():
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
10
- # Initialize the pipeline with CUDA support
11
- pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
12
- pipeline.to(device)
 
13
 
14
- def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
15
  # Set the generator seed
16
  generator = torch.Generator(device=device).manual_seed(seed)
17
 
@@ -43,22 +48,28 @@ def generate_gif(prompt, image, negative_prompt, num_inference_steps, guidance_s
43
  return gif_path
44
 
45
  # Create the Gradio interface with tabs
46
- with gr.Tabs() as demo:
 
 
47
  with gr.TabItem("Generate from Text or Image"):
48
- interface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  fn=generate_gif,
50
- inputs=[
51
- gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt"),
52
- gr.Image(type="filepath", label="Input Image (optional)"),
53
- gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt"),
54
- gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps"),
55
- gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale"),
56
- gr.Number(label="Seed", value=8888)
57
- ],
58
- outputs=gr.Video(label="Generated GIF"),
59
- title="I2VGen-XL GIF Generator",
60
- description="Generate a GIF from a text prompt and/or an image using the I2VGen-XL model."
61
  )
62
 
63
  # Launch the interface
64
- demo.launch()
 
3
  from diffusers import I2VGenXLPipeline
4
  from diffusers.utils import export_to_gif, load_image
5
  import tempfile
6
+ import spaces
7
 
8
+ # Function to initialize the pipeline with CUDA support
9
+ @spaces.GPU
10
  def initialize_pipeline():
11
+ # Check if CUDA is available and set the device
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
 
14
+ # Initialize the pipeline with CUDA support
15
+ pipeline = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")
16
+ pipeline.to(device)
17
+ return pipeline, device
18
 
19
+ def generate_gif(pipeline, device, prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed):
20
  # Set the generator seed
21
  generator = torch.Generator(device=device).manual_seed(seed)
22
 
 
48
  return gif_path
49
 
50
  # Create the Gradio interface with tabs
51
+ with gr.Blocks() as demo:
52
+ pipeline, device = initialize_pipeline()
53
+
54
  with gr.TabItem("Generate from Text or Image"):
55
+ with gr.Row():
56
+ with gr.Column():
57
+ prompt = gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="Prompt")
58
+ image = gr.Image(type="filepath", label="Input Image (optional)")
59
+ negative_prompt = gr.Textbox(lines=2, placeholder="Enter your negative prompt here...", label="Negative Prompt")
60
+ num_inference_steps = gr.Slider(1, 100, step=1, value=50, label="Number of Inference Steps")
61
+ guidance_scale = gr.Slider(1, 20, step=0.1, value=9.0, label="Guidance Scale")
62
+ seed = gr.Number(label="Seed", value=8888)
63
+ generate_button = gr.Button("Generate GIF")
64
+
65
+ with gr.Column():
66
+ output_video = gr.Video(label="Generated GIF")
67
+
68
+ generate_button.click(
69
  fn=generate_gif,
70
+ inputs=[pipeline, device, prompt, image, negative_prompt, num_inference_steps, guidance_scale, seed],
71
+ outputs=output_video
 
 
 
 
 
 
 
 
 
72
  )
73
 
74
  # Launch the interface
75
+ demo.launch()