ercanburak commited on
Commit
95312c3
1 Parent(s): c103e03

first version of ffmpeg command

Browse files
Files changed (2) hide show
  1. app.py +52 -22
  2. utils.py +35 -0
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import os
 
2
 
3
  import streamlit as st
4
 
5
- from utils import get_configs, get_display_names, get_path_for_viz
6
 
7
  # st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
8
  #
@@ -20,6 +21,8 @@ st.title("Result Analysis Tool")
20
 
21
  data_base_path = "/home/bercan/ebv/evreal_data"
22
 
 
 
23
  dataset_cfg_path = os.path.join("cfg", "dataset")
24
  model_cfg_path = os.path.join("cfg", "model")
25
  metric_cfg_path = os.path.join("cfg", "metric")
@@ -75,10 +78,10 @@ model_only_viz = [viz for viz in selected_visualizations if viz['viz_type'] == '
75
  both_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'both']
76
 
77
  recon_viz = {"name": "recon", "display_name": "Reconstruction", "viz_type": "both", "gt_type": "frame"}
78
- ground_truth = {"name": "gt", "display_name": "Ground Truth", "model_id": "groundtruth"}
79
 
80
  model_viz = [recon_viz] + both_viz + selected_metrics + model_only_viz
81
- num_model_rows = len(model_viz) + 1
82
 
83
  gt_viz = []
84
  if selected_dataset['has_frames']:
@@ -87,28 +90,55 @@ if selected_dataset['has_frames']:
87
  gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'frame'])
88
 
89
  gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'event'])
90
- gt_viz.extend([viz for viz in gt_viz if viz['gt_type'] == 'event'])
 
 
 
 
91
 
92
- num_gt_rows = len(gt_viz) + 1
93
  num_rows = max(num_model_rows, num_gt_rows)
94
 
95
- num_model_columns = len(selected_models) + 1
 
 
96
 
 
 
 
 
97
  for row_idx in range(num_rows):
98
- row_visualizations = []
99
  for col_idx in range(num_model_columns):
100
- if row_idx == 0 and col_idx == 0:
101
- print("meta")
102
- pass
103
- elif row_idx == 0:
104
- # model names
105
- print(selected_models[col_idx - 1]['display_name'])
106
- pass
107
- elif col_idx == 0:
108
- # metric names
109
- print(model_viz[row_idx - 1]['display_name'])
110
- pass
111
- else:
112
- video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, selected_models[col_idx - 1], model_viz[row_idx - 1])
113
- print(video_path)
114
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import subprocess
3
 
4
  import streamlit as st
5
 
6
+ from utils import get_configs, get_display_names, get_path_for_viz, Layout
7
 
8
  # st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
9
  #
 
21
 
22
  data_base_path = "/home/bercan/ebv/evreal_data"
23
 
24
+ font_path = "font/Ubuntu-B.ttf"
25
+
26
  dataset_cfg_path = os.path.join("cfg", "dataset")
27
  model_cfg_path = os.path.join("cfg", "model")
28
  metric_cfg_path = os.path.join("cfg", "metric")
 
78
  both_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'both']
79
 
80
  recon_viz = {"name": "recon", "display_name": "Reconstruction", "viz_type": "both", "gt_type": "frame"}
81
+ # ground_truth = {"name": "gt", "display_name": "Ground Truth", "model_id": "groundtruth"}
82
 
83
  model_viz = [recon_viz] + both_viz + selected_metrics + model_only_viz
84
+ num_model_rows = len(model_viz)
85
 
86
  gt_viz = []
87
  if selected_dataset['has_frames']:
 
90
  gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'frame'])
91
 
92
  gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'event'])
93
+ gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'event'])
94
+
95
+ # print(get_display_names(model_viz))
96
+ # print(get_display_names(gt_viz))
97
+ # st.stop()
98
 
99
+ num_gt_rows = len(gt_viz)
100
  num_rows = max(num_model_rows, num_gt_rows)
101
 
102
+ num_model_columns = len(selected_models)
103
+
104
+ num_elements = num_rows * num_model_columns
105
 
106
+ layout = Layout(num_rows, num_model_columns)
107
+ layout_str = layout.get_layout_str()
108
+
109
+ video_paths = []
110
  for row_idx in range(num_rows):
 
111
  for col_idx in range(num_model_columns):
112
+ video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence,
113
+ selected_models[col_idx], model_viz[row_idx])
114
+ print(video_path)
115
+ video_paths.append(video_path)
116
+ # if os.path.isfile(video_path):
117
+ # video_paths.append(video_path)
118
+ # else:
119
+ # print("Video path does not exist: " + video_path)
120
+ #
121
+ # assert len(video_paths) == num_elements, "Number of video paths is not equal to expected number of elements"
122
+
123
+ inputs_str = " ".join(["-i " + video_path for video_path in video_paths])
124
+
125
+ crop_str = "crop=trunc(iw/2)*2:trunc(ih/2)*2"
126
+
127
+ w = selected_dataset["width"]
128
+ input_scaling_parts = []
129
+ xstack_input_parts = []
130
+ for i in range(num_elements):
131
+ input_scaling_part = "[{}:v]scale={}:-1,{}[v{}]".format(i, w, crop_str, i)
132
+ input_scaling_parts.append(input_scaling_part)
133
+ xstack_input_part = "[v{}]".format(i)
134
+ xstack_input_parts.append(xstack_input_part)
135
+ input_scaling_str = ";".join(input_scaling_parts)
136
+ xstack_input_str = "".join(xstack_input_parts)
137
+
138
+
139
+
140
+ # opt = "-c:v libx264 -preset veryslow -crf 18 -c:a copy"
141
+ opt = ""
142
+ ffmpeg_command_str = "ffmpeg -y " + inputs_str + " -filter_complex \"" + input_scaling_str + ";" + xstack_input_str + "xstack=inputs=" + str(num_elements) + ":layout=" + layout_str + "\"" + opt + " output.mp4"
143
+ print(ffmpeg_command_str)
144
+ subprocess.call(ffmpeg_command_str, shell=True)
utils.py CHANGED
@@ -36,3 +36,38 @@ def get_path_for_viz(base_path, dataset, sequence, model, viz):
36
  return video_path
37
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  return video_path
37
 
38
 
39
+ def get_text_video(video_h, video_w, text, font, fontsize, fontcolor="black"):
40
+ pass
41
+
42
+
43
+ class Layout:
44
+ def __init__(self, num_rows, num_cols):
45
+ self.num_rows = num_rows
46
+ self.num_cols = num_cols
47
+
48
+ def get_layout_str(self):
49
+ layout_parts = []
50
+ layout_y_parts = []
51
+ for row_idx in range(self.num_rows):
52
+ layout_x_parts = []
53
+ if row_idx == 0:
54
+ layout_y_parts.append("0")
55
+ else:
56
+ upper_vid_no = (row_idx - 1) * self.num_cols
57
+ if row_idx > 1:
58
+ layout_y_parts.append(layout_y_parts[-1] + "+h" + str(upper_vid_no))
59
+ else:
60
+ layout_y_parts.append("h" + str(upper_vid_no))
61
+ for col_idx in range(self.num_cols):
62
+ if col_idx == 0:
63
+ layout_x_parts.append("0")
64
+ else:
65
+ left_vid_no = row_idx * self.num_cols + col_idx - 1
66
+ if col_idx > 1:
67
+ layout_x_parts.append(layout_x_parts[-1] + "+w" + str(left_vid_no))
68
+ else:
69
+ layout_x_parts.append("w" + str(left_vid_no))
70
+ layout_part = "_".join([layout_x_parts[-1], layout_y_parts[-1]])
71
+ layout_parts.append(layout_part)
72
+ layout_string = "|".join(layout_parts)
73
+ return layout_string