File size: 2,955 Bytes
8c31b90
 
df9abf4
 
8c31b90
df9abf4
 
 
 
 
 
8c31b90
df9abf4
 
 
 
8c31b90
df9abf4
 
 
8c31b90
 
 
 
 
 
 
 
 
df9abf4
8c31b90
 
 
 
df9abf4
8c31b90
 
 
 
df9abf4
8c31b90
 
 
 
df9abf4
 
8c31b90
 
df9abf4
 
8c31b90
df9abf4
8c31b90
 
df9abf4
8c31b90
 
df9abf4
8c31b90
 
df9abf4
 
8c31b90
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os

import streamlit as st

from utils import get_configs, get_display_names

# st.header("EVREAL  - Event-based Video Reconstruction Evaluation and Analysis Library")
#
# paper_link = "https://arxiv.org/abs/2305.00434"
# code_link = "https://github.com/ercanburak/EVREAL"
# page_link = "https://ercanburak.github.io/evreal.html"
# instructions_video = "https://www.youtube.com/watch?v="
#
# st.markdown("Paper: " + paper_link, unsafe_allow_html=True)
# st.markdown("Code: " + paper_link, unsafe_allow_html=True)
# st.markdown("Page: " + paper_link, unsafe_allow_html=True)
# st.markdown("Please see this video for instructions on how to use this tool: " + instructions_video, unsafe_allow_html=True)

st.title("Result Analysis Tool")

dataset_cfg_path = os.path.join("cfg", "dataset")
model_cfg_path = os.path.join("cfg", "model")
metric_cfg_path = os.path.join("cfg", "metric")
viz_cfg_path = os.path.join("cfg", "viz")

datasets = get_configs(dataset_cfg_path)
models = get_configs(model_cfg_path)
metrics = get_configs(metric_cfg_path)
vizs = get_configs(viz_cfg_path)

dataset_display_names = get_display_names(datasets)
model_display_names = get_display_names(models)
metric_display_names = get_display_names(metrics)
viz_display_names = get_display_names(vizs)

assert len(set(dataset_display_names)) == len(dataset_display_names), "Dataset display names are not unique"
assert len(set(model_display_names)) == len(model_display_names), "Model display names are not unique"
assert len(set(metric_display_names)) == len(metric_display_names), "Metric display names are not unique"
assert len(set(viz_display_names)) == len(viz_display_names), "Viz display names are not unique"

selected_model_names = st.multiselect('Select multiple methods to compare', model_display_names)
selected_models = [model for model in models if model['display_name'] in selected_model_names]

col1, col2 = st.columns(2)

with col1:
    selected_dataset_name = st.selectbox('Select dataset', options=dataset_display_names)
    selected_dataset = [dataset for dataset in datasets if dataset['display_name'] == selected_dataset_name][0]

with col2:
    selected_sequence = st.selectbox('Select sequence', options=selected_dataset["sequences"].keys())

usable_metrics = [metric for metric in metrics if metric['no_ref'] == selected_dataset['no_ref']]
usable_metric_display_names = get_display_names(usable_metrics)

selected_metric_names = st.multiselect('Select metrics to display', usable_metric_display_names)
selected_metrics = [metric for metric in usable_metrics if metric['display_name'] in selected_metric_names]

selected_viz = st.multiselect('Select other visualizations to display', viz_display_names)
selected_visualizations = [viz for viz in vizs if viz['display_name'] in selected_viz]

if not st.button('Get Results'):
    st.stop()


# num_gt_column = 1 if len(x.intersection(y)) > 0 else 0
#
# num_columns = len(selected_pretrained_model_names) + 1