Spaces:
Running
Running
Commit
·
0d0ab89
1
Parent(s):
292cdf0
logic will be fixed
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2 |
|
3 |
import streamlit as st
|
4 |
|
5 |
-
from utils import get_configs, get_display_names
|
6 |
|
7 |
# st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
|
8 |
#
|
@@ -18,6 +18,8 @@ from utils import get_configs, get_display_names
|
|
18 |
|
19 |
st.title("Result Analysis Tool")
|
20 |
|
|
|
|
|
21 |
dataset_cfg_path = os.path.join("cfg", "dataset")
|
22 |
model_cfg_path = os.path.join("cfg", "model")
|
23 |
metric_cfg_path = os.path.join("cfg", "metric")
|
@@ -26,12 +28,12 @@ viz_cfg_path = os.path.join("cfg", "viz")
|
|
26 |
datasets = get_configs(dataset_cfg_path)
|
27 |
models = get_configs(model_cfg_path)
|
28 |
metrics = get_configs(metric_cfg_path)
|
29 |
-
|
30 |
|
31 |
dataset_display_names = get_display_names(datasets)
|
32 |
model_display_names = get_display_names(models)
|
33 |
metric_display_names = get_display_names(metrics)
|
34 |
-
viz_display_names = get_display_names(
|
35 |
|
36 |
assert len(set(dataset_display_names)) == len(dataset_display_names), "Dataset display names are not unique"
|
37 |
assert len(set(model_display_names)) == len(model_display_names), "Model display names are not unique"
|
@@ -57,13 +59,46 @@ selected_metric_names = st.multiselect('Select metrics to display', usable_metri
|
|
57 |
selected_metrics = [metric for metric in usable_metrics if metric['display_name'] in selected_metric_names]
|
58 |
|
59 |
selected_viz = st.multiselect('Select other visualizations to display', viz_display_names)
|
60 |
-
selected_visualizations = [viz for viz in
|
61 |
|
62 |
if not st.button('Get Results'):
|
63 |
st.stop()
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
import streamlit as st
|
4 |
|
5 |
+
from utils import get_configs, get_display_names, get_path_for_viz
|
6 |
|
7 |
# st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
|
8 |
#
|
|
|
18 |
|
19 |
st.title("Result Analysis Tool")
|
20 |
|
21 |
+
data_base_path = "/home/bercan/ebv/evreal_data"
|
22 |
+
|
23 |
dataset_cfg_path = os.path.join("cfg", "dataset")
|
24 |
model_cfg_path = os.path.join("cfg", "model")
|
25 |
metric_cfg_path = os.path.join("cfg", "metric")
|
|
|
28 |
datasets = get_configs(dataset_cfg_path)
|
29 |
models = get_configs(model_cfg_path)
|
30 |
metrics = get_configs(metric_cfg_path)
|
31 |
+
visualizations = get_configs(viz_cfg_path)
|
32 |
|
33 |
dataset_display_names = get_display_names(datasets)
|
34 |
model_display_names = get_display_names(models)
|
35 |
metric_display_names = get_display_names(metrics)
|
36 |
+
viz_display_names = get_display_names(visualizations)
|
37 |
|
38 |
assert len(set(dataset_display_names)) == len(dataset_display_names), "Dataset display names are not unique"
|
39 |
assert len(set(model_display_names)) == len(model_display_names), "Model display names are not unique"
|
|
|
59 |
selected_metrics = [metric for metric in usable_metrics if metric['display_name'] in selected_metric_names]
|
60 |
|
61 |
selected_viz = st.multiselect('Select other visualizations to display', viz_display_names)
|
62 |
+
selected_visualizations = [viz for viz in visualizations if viz['display_name'] in selected_viz]
|
63 |
|
64 |
if not st.button('Get Results'):
|
65 |
st.stop()
|
66 |
|
67 |
+
gt_only_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'gt_only']
|
68 |
+
model_only_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'model_only']
|
69 |
+
both_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'both']
|
70 |
+
|
71 |
+
recon_viz = {"name": "recon", "display_name": "Reconstruction", "viz_type": "both"}
|
72 |
+
ground_truth = {"name": "gt", "display_name": "Ground Truth", "model_id": "groundtruth"}
|
73 |
+
|
74 |
+
model_viz = [recon_viz] + both_viz + selected_metrics + model_only_viz
|
75 |
+
|
76 |
+
num_columns = len(selected_models) + 1
|
77 |
+
if len(gt_only_viz) > 0:
|
78 |
+
num_columns += 1
|
79 |
+
selected_models.append(ground_truth)
|
80 |
+
|
81 |
+
num_rows = max(len(selected_metrics) + 1, len(gt_only_viz)) + 1 + len(both_viz)
|
82 |
+
|
83 |
+
for row_idx in range(num_rows):
|
84 |
+
row_visualizations = []
|
85 |
+
for col_idx in range(num_columns):
|
86 |
+
if row_idx == 0 and col_idx == 0:
|
87 |
+
print("meta")
|
88 |
+
pass
|
89 |
+
elif row_idx == 0:
|
90 |
+
# model names
|
91 |
+
print(selected_models[col_idx - 1]['display_name'])
|
92 |
+
pass
|
93 |
+
elif col_idx == 0:
|
94 |
+
# metric names
|
95 |
+
print(model_viz[row_idx - 1]['display_name'])
|
96 |
+
pass
|
97 |
+
elif col_idx == num_columns - 1:
|
98 |
+
# gt only viz
|
99 |
+
print(gt_only_viz[row_idx - 1]['display_name'])
|
100 |
+
pass
|
101 |
+
else:
|
102 |
+
video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, selected_models[col_idx - 1], model_viz[row_idx - 1])
|
103 |
+
print(video_path)
|
104 |
+
pass
|
utils.py
CHANGED
@@ -29,10 +29,10 @@ def get_display_names(configs):
|
|
29 |
display_names.append(config['display_name'])
|
30 |
return display_names
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
|
|
29 |
display_names.append(config['display_name'])
|
30 |
return display_names
|
31 |
|
32 |
+
|
33 |
+
def get_path_for_viz(base_path, dataset, sequence, model, viz):
|
34 |
+
dat_seq = "_".join([dataset["name"], sequence])
|
35 |
+
video_path = os.path.join(base_path, model['model_id'], dat_seq, "videos", viz["name"] + ".mp4")
|
36 |
+
return video_path
|
37 |
+
|
38 |
+
|