Spaces:
Runtime error
Runtime error
ercanburak
commited on
Commit
•
523c064
1
Parent(s):
e3cd871
second working version
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import subprocess
|
|
3 |
|
4 |
import streamlit as st
|
5 |
|
6 |
-
from utils import get_configs, get_display_names, get_path_for_viz,
|
7 |
|
8 |
# st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
|
9 |
#
|
@@ -92,81 +92,67 @@ if selected_dataset['has_frames']:
|
|
92 |
gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'event'])
|
93 |
gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'event'])
|
94 |
|
95 |
-
# print(get_display_names(model_viz))
|
96 |
-
# print(get_display_names(gt_viz))
|
97 |
-
# st.stop()
|
98 |
-
|
99 |
num_gt_rows = len(gt_viz)
|
100 |
num_rows = max(num_model_rows, num_gt_rows)
|
101 |
|
102 |
if len(gt_viz) > 0:
|
103 |
selected_models.append(ground_truth)
|
104 |
|
|
|
105 |
num_cols = len(selected_models)
|
106 |
-
|
|
|
107 |
num_elements = num_rows * num_cols
|
108 |
|
|
|
|
|
|
|
|
|
109 |
video_paths = []
|
110 |
-
row_heights = [
|
111 |
for row_idx in range(num_rows):
|
112 |
for col_idx in range(num_cols):
|
113 |
cur_model = selected_models[col_idx]
|
114 |
if cur_model['name'] == "gt":
|
115 |
-
if row_idx
|
116 |
-
video_path = ""
|
117 |
-
else:
|
118 |
video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, cur_model, gt_viz[row_idx])
|
119 |
# if not os.path.isfile(video_path):
|
120 |
# raise ValueError("Video path does not exist: " + video_path)
|
121 |
-
else:
|
122 |
-
if row_idx >= len(model_viz):
|
123 |
-
video_path = ""
|
124 |
else:
|
|
|
|
|
|
|
125 |
video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, cur_model, model_viz[row_idx])
|
126 |
# if not os.path.isfile(video_path):
|
127 |
# raise ValueError("Video path does not exist: " + video_path)
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
133 |
video_paths.append(video_path)
|
|
|
|
|
|
|
|
|
134 |
|
135 |
-
inputs_str = " ".join(["-i " + video_path for video_path in video_paths
|
136 |
-
num_inputs = len(
|
137 |
-
|
138 |
-
crop_str = "crop=trunc(iw/2)*2:trunc(ih/2)*2"
|
139 |
-
pad_str = "pad=ceil(iw/2)*2+4:ceil(ih/2)*2+4:2:2"
|
140 |
-
|
141 |
-
# empty_elem_str = "drawbox=t=fill:c=black,scale={}:{}[v{}]"
|
142 |
-
|
143 |
-
w = selected_dataset["width"]
|
144 |
-
input_filter_parts = []
|
145 |
-
xstack_input_parts = []
|
146 |
-
input_vid_no = 0
|
147 |
-
for i in range(num_elements):
|
148 |
-
row_idx = i // num_cols
|
149 |
-
xstack_input_part = "[v{}]".format(i)
|
150 |
-
xstack_input_parts.append(xstack_input_part)
|
151 |
-
if video_paths[i]:
|
152 |
-
input_filter_part = "[{}:v]".format(input_vid_no)
|
153 |
-
input_vid_no += 1
|
154 |
-
else:
|
155 |
-
input_filter_part = "color=c=black,"
|
156 |
-
input_filter_part = "[{}]drawbox=:w={}:h={}:color=black:t=fill,".format(num_inputs, w, row_heights[row_idx])
|
157 |
-
pass
|
158 |
-
input_filter_part = input_filter_part + "scale={}:-1,{}[v{}]".format(w, pad_str, i)
|
159 |
-
input_filter_parts.append(input_filter_part)
|
160 |
|
161 |
input_scaling_str = ";".join(input_filter_parts)
|
162 |
xstack_input_str = "".join(xstack_input_parts)
|
163 |
-
|
164 |
-
layout = Layout(num_rows, num_cols)
|
165 |
-
layout_str = layout.get_layout_str()
|
166 |
|
167 |
# opt = "-c:v libx264 -preset veryslow -crf 18 -c:a copy"
|
168 |
opt = ""
|
169 |
-
|
|
|
|
|
170 |
print(ffmpeg_command_str)
|
171 |
ret = subprocess.call(ffmpeg_command_str, shell=True)
|
172 |
|
|
|
3 |
|
4 |
import streamlit as st
|
5 |
|
6 |
+
from utils import get_configs, get_display_names, get_path_for_viz, get_video_height
|
7 |
|
8 |
# st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
|
9 |
#
|
|
|
92 |
gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'event'])
|
93 |
gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'event'])
|
94 |
|
|
|
|
|
|
|
|
|
95 |
num_gt_rows = len(gt_viz)
|
96 |
num_rows = max(num_model_rows, num_gt_rows)
|
97 |
|
98 |
if len(gt_viz) > 0:
|
99 |
selected_models.append(ground_truth)
|
100 |
|
101 |
+
padding = 2
|
102 |
num_cols = len(selected_models)
|
103 |
+
crop_str = "crop=trunc(iw/2)*2:trunc(ih/2)*2"
|
104 |
+
pad_str = "pad=ceil(iw/2)*2+{}:ceil(ih/2)*2+{}:{}:{}".format(padding*2, padding*2, padding, padding)
|
105 |
num_elements = num_rows * num_cols
|
106 |
|
107 |
+
w = selected_dataset["width"]
|
108 |
+
input_filter_parts = []
|
109 |
+
xstack_input_parts = []
|
110 |
+
layout_parts = []
|
111 |
video_paths = []
|
112 |
+
row_heights = [""]*num_rows
|
113 |
for row_idx in range(num_rows):
|
114 |
for col_idx in range(num_cols):
|
115 |
cur_model = selected_models[col_idx]
|
116 |
if cur_model['name'] == "gt":
|
117 |
+
if row_idx < len(gt_viz):
|
|
|
|
|
118 |
video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, cur_model, gt_viz[row_idx])
|
119 |
# if not os.path.isfile(video_path):
|
120 |
# raise ValueError("Video path does not exist: " + video_path)
|
|
|
|
|
|
|
121 |
else:
|
122 |
+
continue
|
123 |
+
else:
|
124 |
+
if row_idx < len(model_viz):
|
125 |
video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, cur_model, model_viz[row_idx])
|
126 |
# if not os.path.isfile(video_path):
|
127 |
# raise ValueError("Video path does not exist: " + video_path)
|
128 |
+
else:
|
129 |
+
continue
|
130 |
+
|
131 |
+
vid_idx = len(video_paths)
|
132 |
+
if row_heights[row_idx] == "":
|
133 |
+
row_heights[row_idx] = "h{}".format(vid_idx)
|
134 |
+
input_filter_part = "[{}:v]scale={}:-1,{}[v{}]".format(vid_idx, w, pad_str, vid_idx)
|
135 |
+
input_filter_parts.append(input_filter_part)
|
136 |
+
xstack_input_part = "[v{}]".format(vid_idx)
|
137 |
+
xstack_input_parts.append(xstack_input_part)
|
138 |
video_paths.append(video_path)
|
139 |
+
layout_w = "+".join(["w{}".format(i) for i in range(col_idx)]) if col_idx > 0 else "0"
|
140 |
+
layout_h = "+".join(row_heights[:row_idx]) if row_idx > 0 else "0"
|
141 |
+
layout_part = layout_w + "_" + layout_h
|
142 |
+
layout_parts.append(layout_part)
|
143 |
|
144 |
+
inputs_str = " ".join(["-i " + video_path for video_path in video_paths])
|
145 |
+
num_inputs = len(video_paths)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
input_scaling_str = ";".join(input_filter_parts)
|
148 |
xstack_input_str = "".join(xstack_input_parts)
|
149 |
+
layout_str = "|".join(layout_parts)
|
|
|
|
|
150 |
|
151 |
# opt = "-c:v libx264 -preset veryslow -crf 18 -c:a copy"
|
152 |
opt = ""
|
153 |
+
opt_fill = ":fill=black"
|
154 |
+
# opt_fill = ""
|
155 |
+
ffmpeg_command_str = "ffmpeg -y " + inputs_str + " -filter_complex \"" + input_scaling_str + ";" + xstack_input_str + "xstack=inputs=" + str(num_inputs) + ":layout=" + layout_str + opt_fill + "\"" + opt + " output.mp4"
|
156 |
print(ffmpeg_command_str)
|
157 |
ret = subprocess.call(ffmpeg_command_str, shell=True)
|
158 |
|
utils.py
CHANGED
@@ -41,39 +41,6 @@ def get_text_video(video_h, video_w, text, font, fontsize, fontcolor="black"):
|
|
41 |
pass
|
42 |
|
43 |
|
44 |
-
class Layout:
|
45 |
-
def __init__(self, num_rows, num_cols):
|
46 |
-
self.num_rows = num_rows
|
47 |
-
self.num_cols = num_cols
|
48 |
-
|
49 |
-
def get_layout_str(self):
|
50 |
-
layout_parts = []
|
51 |
-
layout_y_parts = []
|
52 |
-
for row_idx in range(self.num_rows):
|
53 |
-
layout_x_parts = []
|
54 |
-
if row_idx == 0:
|
55 |
-
layout_y_parts.append("0")
|
56 |
-
else:
|
57 |
-
upper_vid_no = (row_idx - 1) * self.num_cols
|
58 |
-
if row_idx > 1:
|
59 |
-
layout_y_parts.append(layout_y_parts[-1] + "+h" + str(upper_vid_no))
|
60 |
-
else:
|
61 |
-
layout_y_parts.append("h" + str(upper_vid_no))
|
62 |
-
for col_idx in range(self.num_cols):
|
63 |
-
if col_idx == 0:
|
64 |
-
layout_x_parts.append("0")
|
65 |
-
else:
|
66 |
-
left_vid_no = row_idx * self.num_cols + col_idx - 1
|
67 |
-
if col_idx > 1:
|
68 |
-
layout_x_parts.append(layout_x_parts[-1] + "+w" + str(left_vid_no))
|
69 |
-
else:
|
70 |
-
layout_x_parts.append("w" + str(left_vid_no))
|
71 |
-
layout_part = "_".join([layout_x_parts[-1], layout_y_parts[-1]])
|
72 |
-
layout_parts.append(layout_part)
|
73 |
-
layout_string = "|".join(layout_parts)
|
74 |
-
return layout_string
|
75 |
-
|
76 |
-
|
77 |
def get_video_height(video_path):
|
78 |
probe = ffmpeg.probe(video_path)
|
79 |
video_streams = ffmpeg.probe(video_path, select_streams="v")
|
|
|
41 |
pass
|
42 |
|
43 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
def get_video_height(video_path):
|
45 |
probe = ffmpeg.probe(video_path)
|
46 |
video_streams = ffmpeg.probe(video_path, select_streams="v")
|