zejunyang commited on
Commit
1061388
·
1 Parent(s): 0f77f4f

limit param

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -98,7 +98,7 @@ vis = FaceMeshVisualizer()
98
 
99
  frame_inter_model = init_frame_interpolation_model()
100
 
101
- @spaces.GPU(duration=300)
102
  def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, length=60, seed=42):
103
  fps = 30
104
  cfg = 3.5
@@ -164,7 +164,7 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
164
  # [transforms.Resize((height, width)), transforms.ToTensor()]
165
  # )
166
  args_L = len(pose_images) if length==0 or length > len(pose_images) else length
167
- args_L = min(args_L, 90)
168
  for pose_image_np in pose_images[: args_L : fi_step]:
169
  # pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
170
  # pose_tensor_list.append(pose_transform(pose_image_pil))
@@ -209,7 +209,7 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
209
 
210
  return save_path.replace('_noaudio.mp4', '.mp4'), ref_image_pil
211
 
212
- @spaces.GPU(duration=300)
213
  def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
214
  cfg = 3.5
215
  fi_step = 3
@@ -257,7 +257,7 @@ def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
257
  verts_list = []
258
  bs_list = []
259
  args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
260
- args_L = min(args_L, 90*step)
261
  for src_image_pil in source_images[: args_L : step*fi_step]:
262
  src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
263
  frame_height, frame_width, _ = src_img_np.shape
@@ -379,11 +379,11 @@ with gr.Blocks() as demo:
379
  a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
380
 
381
  with gr.Row():
382
- a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
383
  a2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
384
 
385
  with gr.Row():
386
- a2v_length = gr.Slider(minimum=0, maximum=90, step=1, value=30, label="Length (-L)")
387
  a2v_seed = gr.Number(value=42, label="Seed (--seed)")
388
 
389
  a2v_botton = gr.Button("Generate", variant="primary")
@@ -407,11 +407,11 @@ with gr.Blocks() as demo:
407
  v2v_source_video = gr.Video(label="Upload source video", sources="upload")
408
 
409
  with gr.Row():
410
- v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
411
  v2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
412
 
413
  with gr.Row():
414
- v2v_length = gr.Slider(minimum=0, maximum=90, step=1, value=30, label="Length (-L)")
415
  v2v_seed = gr.Number(value=42, label="Seed (--seed)")
416
 
417
  v2v_botton = gr.Button("Generate", variant="primary")
 
98
 
99
  frame_inter_model = init_frame_interpolation_model()
100
 
101
+ @spaces.GPU(duration=100)
102
  def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, length=60, seed=42):
103
  fps = 30
104
  cfg = 3.5
 
164
  # [transforms.Resize((height, width)), transforms.ToTensor()]
165
  # )
166
  args_L = len(pose_images) if length==0 or length > len(pose_images) else length
167
+ args_L = min(args_L, 60)
168
  for pose_image_np in pose_images[: args_L : fi_step]:
169
  # pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
170
  # pose_tensor_list.append(pose_transform(pose_image_pil))
 
209
 
210
  return save_path.replace('_noaudio.mp4', '.mp4'), ref_image_pil
211
 
212
+ @spaces.GPU(duration=100)
213
  def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
214
  cfg = 3.5
215
  fi_step = 3
 
257
  verts_list = []
258
  bs_list = []
259
  args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
260
+ args_L = min(args_L, 60*step)
261
  for src_image_pil in source_images[: args_L : step*fi_step]:
262
  src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
263
  frame_height, frame_width, _ = src_img_np.shape
 
379
  a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
380
 
381
  with gr.Row():
382
+ a2v_size_slider = gr.Slider(minimum=256, maximum=512, step=8, value=512, label="Video size (-W & -H)")
383
  a2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
384
 
385
  with gr.Row():
386
+ a2v_length = gr.Slider(minimum=0, maximum=60, step=1, value=30, label="Length (-L)")
387
  a2v_seed = gr.Number(value=42, label="Seed (--seed)")
388
 
389
  a2v_botton = gr.Button("Generate", variant="primary")
 
407
  v2v_source_video = gr.Video(label="Upload source video", sources="upload")
408
 
409
  with gr.Row():
410
+ v2v_size_slider = gr.Slider(minimum=256, maximum=512, step=8, value=512, label="Video size (-W & -H)")
411
  v2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
412
 
413
  with gr.Row():
414
+ v2v_length = gr.Slider(minimum=0, maximum=60, step=1, value=30, label="Length (-L)")
415
  v2v_seed = gr.Number(value=42, label="Seed (--seed)")
416
 
417
  v2v_botton = gr.Button("Generate", variant="primary")