ychenhq commited on
Commit
be00882
1 Parent(s): 0b158ed

remove all basedir

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -15,7 +15,6 @@ import math
15
  import requests
16
 
17
 
18
- basedir = "https://huggingface.co/spaces/ychenhq/VideoCrafterXen/tree/main"
19
  vidOut = "results/results"
20
  uvqOut = "results/modified_prompts_eval"
21
  evalOut = "evaluation_results"
@@ -30,7 +29,7 @@ def genScore():
30
  for i in range(1, num_of_vid+1):
31
  fileindex = f"{i:04d}"
32
  os.system(
33
- f'python3 ./uvq/uvq_main.py --input_files="{fileindex},2,{basedir}/{vidOut}/{fileindex}.mp4" --output_dir {uvqOut} --model_dir ./uvq/models'
34
  )
35
 
36
 
@@ -55,7 +54,7 @@ def chooseBestVideo():
55
  '''We loop thru this current processed video'''
56
  filedir = f"{i:04d}"
57
  filename = f"{i:04d}_uvq.csv"
58
- with open(os.path.join(basedir, uvqOut, filedir, filename), 'r') as file:
59
  MOS = file.read().strip()
60
 
61
  MOS_score = getScore(MOS)
@@ -88,10 +87,10 @@ def extract_scores_from_json(json_path):
88
  def VBench_eval(vid_filename):
89
  # vid_filename: video filename without .mp4
90
  os.system(
91
- f'python VBench/evaluate.py --dimension "motion_smoothness" --videos_path {os.path.join(basedir, vidOut, vid_filename)}.mp4 --custom_input --output_filename {vid_filename}'
92
  )
93
  eval_file_path = os.path.join(
94
- basedir, evalOut, f"{vid_filename}_eval_results.json")
95
  motion_score = extract_scores_from_json(eval_file_path)
96
 
97
  return motion_score
@@ -99,11 +98,11 @@ def VBench_eval(vid_filename):
99
 
100
  def interpolation(chosen_idx, fps):
101
  vid_filename = f"{chosen_idx:04d}.mp4"
102
- os.chdir(f"{basedir}/ECCV2022-RIFE")
103
  os.system(
104
- f'python3 inference_video.py --exp=2 --video={os.path.join(basedir, vidOut, vid_filename)} --fps {fps}'
105
  )
106
- os.chdir(f"{basedir}")
107
  out_name = f"{chosen_idx:04d}_4X_{fps}fps.mp4"
108
  return out_name
109
 
@@ -196,19 +195,19 @@ def generate_output(input_text, output_video_1, fps, examples):
196
  output = call_gpt_api(
197
  prompt=f"Generate 2 similar prompts and add some reasonable words to the given prompt and not change the meaning, each within 30 words: {input_text}", isSentence=True)
198
  output.append(input_text)
199
- with open(f"{basedir}/prompts/test_prompts.txt", 'w') as file:
200
  for i, sentence in enumerate(output):
201
  if i < len(output) - 1:
202
  file.write(sentence + '\n')
203
  else:
204
  file.write(sentence)
205
  os.system(
206
- f'sh {os.path.join(basedir, "scripts", "run_text2video.sh")}')
207
  # Connect the video output and return the video corresponding link
208
  genScore()
209
  chosen_idx = chooseBestVideo()
210
  chosen_vid_path = interpolation(chosen_idx, fps)
211
- chosen_vid_path = f"{basedir}/{vidOut}/{chosen_vid_path}"
212
  output_video_1 = gr.Video(
213
  value=chosen_vid_path, show_download_button=True)
214
 
@@ -278,7 +277,7 @@ def t2v_demo(result_dir='./tmp/'):
278
  with gr.Tab(label='Result'):
279
  with gr.Row():
280
  output_video_1 = gr.Video(
281
- value=f"{basedir}/sample/0009.mp4", show_download_button=True)
282
 
283
  video_len.change(update_fps, inputs=[video_len, fps], outputs=fps)
284
  # fps.change(update_video_len_slider, inputs = fps, outputs = video_len)
 
15
  import requests
16
 
17
 
 
18
  vidOut = "results/results"
19
  uvqOut = "results/modified_prompts_eval"
20
  evalOut = "evaluation_results"
 
29
  for i in range(1, num_of_vid+1):
30
  fileindex = f"{i:04d}"
31
  os.system(
32
+ f'python3 ./uvq/uvq_main.py --input_files="{fileindex},2, {vidOut}/{fileindex}.mp4" --output_dir {uvqOut} --model_dir ./uvq/models'
33
  )
34
 
35
 
 
54
  '''We loop thru this current processed video'''
55
  filedir = f"{i:04d}"
56
  filename = f"{i:04d}_uvq.csv"
57
+ with open(os.path.join(uvqOut, filedir, filename), 'r') as file:
58
  MOS = file.read().strip()
59
 
60
  MOS_score = getScore(MOS)
 
87
  def VBench_eval(vid_filename):
88
  # vid_filename: video filename without .mp4
89
  os.system(
90
+ f'python VBench/evaluate.py --dimension "motion_smoothness" --videos_path {os.path.join(vidOut, vid_filename)}.mp4 --custom_input --output_filename {vid_filename}'
91
  )
92
  eval_file_path = os.path.join(
93
+ evalOut, f"{vid_filename}_eval_results.json")
94
  motion_score = extract_scores_from_json(eval_file_path)
95
 
96
  return motion_score
 
98
 
99
  def interpolation(chosen_idx, fps):
100
  vid_filename = f"{chosen_idx:04d}.mp4"
101
+ os.chdir("ECCV2022-RIFE")
102
  os.system(
103
+ f'python3 inference_video.py --exp=2 --video={os.path.join(vidOut, vid_filename)} --fps {fps}'
104
  )
105
+ os.chdir("../")
106
  out_name = f"{chosen_idx:04d}_4X_{fps}fps.mp4"
107
  return out_name
108
 
 
195
  output = call_gpt_api(
196
  prompt=f"Generate 2 similar prompts and add some reasonable words to the given prompt and not change the meaning, each within 30 words: {input_text}", isSentence=True)
197
  output.append(input_text)
198
+ with open("prompts/test_prompts.txt", 'w') as file:
199
  for i, sentence in enumerate(output):
200
  if i < len(output) - 1:
201
  file.write(sentence + '\n')
202
  else:
203
  file.write(sentence)
204
  os.system(
205
+ f'sh {os.path.join("scripts", "run_text2video.sh")}')
206
  # Connect the video output and return the video corresponding link
207
  genScore()
208
  chosen_idx = chooseBestVideo()
209
  chosen_vid_path = interpolation(chosen_idx, fps)
210
+ chosen_vid_path = f"{vidOut}/{chosen_vid_path}"
211
  output_video_1 = gr.Video(
212
  value=chosen_vid_path, show_download_button=True)
213
 
 
277
  with gr.Tab(label='Result'):
278
  with gr.Row():
279
  output_video_1 = gr.Video(
280
+ value="sample/0009.mp4", show_download_button=True)
281
 
282
  video_len.change(update_fps, inputs=[video_len, fps], outputs=fps)
283
  # fps.change(update_video_len_slider, inputs = fps, outputs = video_len)