zejunyang commited on
Commit
6d3218f
1 Parent(s): 3e99418
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -162,7 +162,7 @@ def audio2video(input_audio, ref_img, headpose_video=None, size=512, steps=25, l
162
  # [transforms.Resize((height, width)), transforms.ToTensor()]
163
  # )
164
  args_L = len(pose_images) if length==0 or length > len(pose_images) else length
165
- args_L = min(args_L, 150)
166
  for pose_image_np in pose_images[: args_L : fi_step]:
167
  # pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
168
  # pose_tensor_list.append(pose_transform(pose_image_pil))
@@ -253,7 +253,7 @@ def video2video(ref_img, source_video, size=512, steps=25, length=60, seed=42):
253
  verts_list = []
254
  bs_list = []
255
  args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
256
- args_L = min(args_L, 150*step)
257
  for src_image_pil in source_images[: args_L : step*fi_step]:
258
  src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
259
  frame_height, frame_width, _ = src_img_np.shape
@@ -376,10 +376,10 @@ with gr.Blocks() as demo:
376
 
377
  with gr.Row():
378
  a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
379
- a2v_step_slider = gr.Slider(minimum=5, maximum=30, step=1, value=20, label="Steps (--steps)")
380
 
381
  with gr.Row():
382
- a2v_length = gr.Slider(minimum=0, maximum=150, step=1, value=60, label="Length (-L) (Set 0 to automatically calculate video length.)")
383
  a2v_seed = gr.Number(value=42, label="Seed (--seed)")
384
 
385
  a2v_botton = gr.Button("Generate", variant="primary")
@@ -404,10 +404,10 @@ with gr.Blocks() as demo:
404
 
405
  with gr.Row():
406
  v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
407
- v2v_step_slider = gr.Slider(minimum=5, maximum=30, step=1, value=20, label="Steps (--steps)")
408
 
409
  with gr.Row():
410
- v2v_length = gr.Slider(minimum=0, maximum=150, step=1, value=60, label="Length (-L) (Set 0 to automatically calculate video length.)")
411
  v2v_seed = gr.Number(value=42, label="Seed (--seed)")
412
 
413
  v2v_botton = gr.Button("Generate", variant="primary")
 
162
  # [transforms.Resize((height, width)), transforms.ToTensor()]
163
  # )
164
  args_L = len(pose_images) if length==0 or length > len(pose_images) else length
165
+ args_L = min(args_L, 90)
166
  for pose_image_np in pose_images[: args_L : fi_step]:
167
  # pose_image_pil = Image.fromarray(cv2.cvtColor(pose_image_np, cv2.COLOR_BGR2RGB))
168
  # pose_tensor_list.append(pose_transform(pose_image_pil))
 
253
  verts_list = []
254
  bs_list = []
255
  args_L = len(source_images) if length==0 or length*step > len(source_images) else length*step
256
+ args_L = min(args_L, 90*step)
257
  for src_image_pil in source_images[: args_L : step*fi_step]:
258
  src_img_np = cv2.cvtColor(np.array(src_image_pil), cv2.COLOR_RGB2BGR)
259
  frame_height, frame_width, _ = src_img_np.shape
 
376
 
377
  with gr.Row():
378
  a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
379
+ a2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
380
 
381
  with gr.Row():
382
+ a2v_length = gr.Slider(minimum=0, maximum=90, step=1, value=30, label="Length (-L)")
383
  a2v_seed = gr.Number(value=42, label="Seed (--seed)")
384
 
385
  a2v_botton = gr.Button("Generate", variant="primary")
 
404
 
405
  with gr.Row():
406
  v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
407
+ v2v_step_slider = gr.Slider(minimum=5, maximum=20, step=1, value=15, label="Steps (--steps)")
408
 
409
  with gr.Row():
410
+ v2v_length = gr.Slider(minimum=0, maximum=90, step=1, value=30, label="Length (-L)")
411
  v2v_seed = gr.Number(value=42, label="Seed (--seed)")
412
 
413
  v2v_botton = gr.Button("Generate", variant="primary")