Spaces:
Runtime error
Runtime error
Commit
·
b105b21
1
Parent(s):
7977eff
Update base/app.py
Browse files- base/app.py +1 -18
base/app.py
CHANGED
@@ -21,12 +21,6 @@ config_path = "./base/configs/sample.yaml"
|
|
21 |
args = OmegaConf.load("./base/configs/sample.yaml")
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
|
24 |
-
# ------- get model ---------------
|
25 |
-
# model_t2V = model_t2v_fun(args)
|
26 |
-
# model_t2V.to(device)
|
27 |
-
# if device == "cuda":
|
28 |
-
# model_t2V.enable_xformers_memory_efficient_attention()
|
29 |
-
|
30 |
css = """
|
31 |
h1 {
|
32 |
text-align: center;
|
@@ -77,7 +71,6 @@ def infer(prompt, seed_inp, ddim_steps,cfg, infer_type):
|
|
77 |
if device == "cuda":
|
78 |
model.enable_xformers_memory_efficient_attention()
|
79 |
videos = model(prompt, video_length=16, height = 320, width= 512, num_inference_steps=ddim_steps, guidance_scale=cfg).video
|
80 |
-
print(videos[0].shape)
|
81 |
if not os.path.exists(args.output_folder):
|
82 |
os.mkdir(args.output_folder)
|
83 |
torchvision.io.write_video(args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4', videos[0], fps=8)
|
@@ -86,9 +79,6 @@ def infer(prompt, seed_inp, ddim_steps,cfg, infer_type):
|
|
86 |
return args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4'
|
87 |
|
88 |
|
89 |
-
# def clean():
|
90 |
-
# return gr.Video.update(value=None)
|
91 |
-
|
92 |
title = """
|
93 |
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
94 |
<div
|
@@ -118,8 +108,6 @@ with gr.Blocks(css='style.css') as demo:
|
|
118 |
)
|
119 |
with gr.Column():
|
120 |
with gr.Row(elem_id="col-container"):
|
121 |
-
# inputs = [prompt, seed_inp, ddim_steps]
|
122 |
-
# outputs = [video_out]
|
123 |
with gr.Column():
|
124 |
|
125 |
prompt = gr.Textbox(value="a corgi walking in the park at sunrise, oil painting style", label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in", min_width=200, lines=2)
|
@@ -127,12 +115,9 @@ with gr.Blocks(css='style.css') as demo:
|
|
127 |
ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=50, step=1)
|
128 |
seed_inp = gr.Slider(value=-1,label="seed (for random generation, use -1)",show_label=True,minimum=-1,maximum=2147483647)
|
129 |
cfg = gr.Number(label="guidance_scale",value=7.5)
|
130 |
-
# seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=400, elem_id="seed-in")
|
131 |
-
|
132 |
|
133 |
with gr.Column():
|
134 |
submit_btn = gr.Button("Generate video")
|
135 |
-
# clean_btn = gr.Button("Clean video")
|
136 |
video_out = gr.Video(label="Video result", elem_id="video-output")
|
137 |
|
138 |
inputs = [prompt, seed_inp, ddim_steps, cfg, infer_type]
|
@@ -157,10 +142,8 @@ with gr.Blocks(css='style.css') as demo:
|
|
157 |
cache_examples=False,
|
158 |
)
|
159 |
ex.dataset.headers = [""]
|
160 |
-
|
161 |
-
# clean_btn.click(clean, inputs=[], outputs=[video_out], queue=False)
|
162 |
submit_btn.click(infer, inputs, outputs)
|
163 |
-
# share_button.click(None, [], [], _js=share_js)
|
164 |
|
165 |
demo.queue(max_size=12).launch()
|
166 |
|
|
|
21 |
args = OmegaConf.load("./base/configs/sample.yaml")
|
22 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
css = """
|
25 |
h1 {
|
26 |
text-align: center;
|
|
|
71 |
if device == "cuda":
|
72 |
model.enable_xformers_memory_efficient_attention()
|
73 |
videos = model(prompt, video_length=16, height = 320, width= 512, num_inference_steps=ddim_steps, guidance_scale=cfg).video
|
|
|
74 |
if not os.path.exists(args.output_folder):
|
75 |
os.mkdir(args.output_folder)
|
76 |
torchvision.io.write_video(args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4', videos[0], fps=8)
|
|
|
79 |
return args.output_folder + prompt[0:30].replace(' ', '_') + '-'+str(seed_inp)+'-'+str(ddim_steps)+'-'+str(cfg)+ '-.mp4'
|
80 |
|
81 |
|
|
|
|
|
|
|
82 |
title = """
|
83 |
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
84 |
<div
|
|
|
108 |
)
|
109 |
with gr.Column():
|
110 |
with gr.Row(elem_id="col-container"):
|
|
|
|
|
111 |
with gr.Column():
|
112 |
|
113 |
prompt = gr.Textbox(value="a corgi walking in the park at sunrise, oil painting style", label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in", min_width=200, lines=2)
|
|
|
115 |
ddim_steps = gr.Slider(label='Steps', minimum=50, maximum=300, value=50, step=1)
|
116 |
seed_inp = gr.Slider(value=-1,label="seed (for random generation, use -1)",show_label=True,minimum=-1,maximum=2147483647)
|
117 |
cfg = gr.Number(label="guidance_scale",value=7.5)
|
|
|
|
|
118 |
|
119 |
with gr.Column():
|
120 |
submit_btn = gr.Button("Generate video")
|
|
|
121 |
video_out = gr.Video(label="Video result", elem_id="video-output")
|
122 |
|
123 |
inputs = [prompt, seed_inp, ddim_steps, cfg, infer_type]
|
|
|
142 |
cache_examples=False,
|
143 |
)
|
144 |
ex.dataset.headers = [""]
|
145 |
+
|
|
|
146 |
submit_btn.click(infer, inputs, outputs)
|
|
|
147 |
|
148 |
demo.queue(max_size=12).launch()
|
149 |
|