ercanburak commited on
Commit
8c31b90
·
1 Parent(s): d836796

refactor code for using configs better

Browse files
Files changed (2) hide show
  1. app.py +42 -28
  2. utils.py +22 -28
app.py CHANGED
@@ -1,55 +1,69 @@
 
 
1
  import streamlit as st
2
 
3
- from utils import get_dataset_config_names, get_sequence_names, pretrained_models_dict, full_ref_metrics, no_ref_metrics
4
 
5
  # st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
6
  #
7
  # paper_link = "https://arxiv.org/abs/2305.00434"
8
  # code_link = "https://github.com/ercanburak/EVREAL"
9
  # page_link = "https://ercanburak.github.io/evreal.html"
 
10
  #
11
  # st.markdown("Paper: " + paper_link, unsafe_allow_html=True)
12
  # st.markdown("Code: " + paper_link, unsafe_allow_html=True)
13
  # st.markdown("Page: " + paper_link, unsafe_allow_html=True)
 
14
 
15
  st.title("Result Analysis Tool")
16
 
17
- datasets = get_dataset_config_names("data")
18
- dataset_sequences_dict = {}
19
- for dataset in datasets:
20
- dataset_sequences_dict[dataset] = get_sequence_names("data", dataset)
21
- datasets = list(dataset_sequences_dict.keys())
 
 
 
 
22
 
23
- pretrained_models = list(pretrained_models_dict.keys())
 
 
 
24
 
25
- selected_pretrained_model_names = st.multiselect(
26
- 'Select multiple methods to compare',
27
- pretrained_models)
 
28
 
29
- col1, col2, col3 = st.columns(3)
 
 
 
30
 
31
- # A selectbox on the sidebar, for selecting dataset:
32
  with col1:
33
- selected_dataset = st.selectbox(
34
- 'Select dataset',
35
- options=datasets
36
- )
37
 
38
- # A selectbox on the sidebar, for selecting sequence:
39
  with col2:
40
- selected_sequence = st.selectbox(
41
- 'Select sequence',
42
- options=dataset_sequences_dict[selected_dataset]
43
- )
44
-
45
- metric_names = no_ref_metrics if selected_dataset in ["ECD_FAST", "MVSEC_NIGHT", "HDR"] else full_ref_metrics
46
- metric_names = [name.upper() for name in metric_names]
47
 
48
- selected_metric_names = st.multiselect('Select metrics to display', metric_names)
 
49
 
50
- other_visualizations = ["ground truth frames", "event_rates", "event_images", "histograms"]
 
51
 
52
- selected_viz = st.multiselect('Select other visualizations to display', other_visualizations)
 
53
 
54
  if not st.button('Get Results'):
55
- st.stop()
 
 
 
 
 
 
 
1
+ import os
2
+
3
  import streamlit as st
4
 
5
+ from utils import get_configs, get_display_names
6
 
7
  # st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library")
8
  #
9
  # paper_link = "https://arxiv.org/abs/2305.00434"
10
  # code_link = "https://github.com/ercanburak/EVREAL"
11
  # page_link = "https://ercanburak.github.io/evreal.html"
12
+ # instructions_video = "https://www.youtube.com/watch?v="
13
  #
14
  # st.markdown("Paper: " + paper_link, unsafe_allow_html=True)
15
  # st.markdown("Code: " + paper_link, unsafe_allow_html=True)
16
  # st.markdown("Page: " + paper_link, unsafe_allow_html=True)
17
+ # st.markdown("Please see this video for instructions on how to use this tool: " + instructions_video, unsafe_allow_html=True)
18
 
19
  st.title("Result Analysis Tool")
20
 
21
+ dataset_cfg_path = os.path.join("cfg", "dataset")
22
+ model_cfg_path = os.path.join("cfg", "model")
23
+ metric_cfg_path = os.path.join("cfg", "metric")
24
+ viz_cfg_path = os.path.join("cfg", "viz")
25
+
26
+ datasets = get_configs(dataset_cfg_path)
27
+ models = get_configs(model_cfg_path)
28
+ metrics = get_configs(metric_cfg_path)
29
+ vizs = get_configs(viz_cfg_path)
30
 
31
+ dataset_display_names = get_display_names(datasets)
32
+ model_display_names = get_display_names(models)
33
+ metric_display_names = get_display_names(metrics)
34
+ viz_display_names = get_display_names(vizs)
35
 
36
+ assert len(set(dataset_display_names)) == len(dataset_display_names), "Dataset display names are not unique"
37
+ assert len(set(model_display_names)) == len(model_display_names), "Model display names are not unique"
38
+ assert len(set(metric_display_names)) == len(metric_display_names), "Metric display names are not unique"
39
+ assert len(set(viz_display_names)) == len(viz_display_names), "Viz display names are not unique"
40
 
41
+ selected_model_names = st.multiselect('Select multiple methods to compare', model_display_names)
42
+ selected_models = [model for model in models if model['display_name'] in selected_model_names]
43
+
44
+ col1, col2 = st.columns(2)
45
 
 
46
  with col1:
47
+ selected_dataset_name = st.selectbox('Select dataset', options=dataset_display_names)
48
+ selected_dataset = [dataset for dataset in datasets if dataset['display_name'] == selected_dataset_name][0]
 
 
49
 
 
50
  with col2:
51
+ selected_sequence = st.selectbox('Select sequence', options=selected_dataset["sequences"].keys())
 
 
 
 
 
 
52
 
53
+ usable_metrics = [metric for metric in metrics if metric['no_ref'] == selected_dataset['no_ref']]
54
+ usable_metric_display_names = get_display_names(usable_metrics)
55
 
56
+ selected_metric_names = st.multiselect('Select metrics to display', usable_metric_display_names)
57
+ selected_metrics = [metric for metric in usable_metrics if metric['display_name'] in selected_metric_names]
58
 
59
+ selected_viz = st.multiselect('Select other visualizations to display', viz_display_names)
60
+ selected_visualizations = [viz for viz in vizs if viz['display_name'] in selected_viz]
61
 
62
  if not st.button('Get Results'):
63
+ st.stop()
64
+
65
+
66
+ # num_gt_column = 1 if len(x.intersection(y)) > 0 else 0
67
+ #
68
+ # num_columns = len(selected_pretrained_model_names) + 1
69
+
utils.py CHANGED
@@ -5,40 +5,34 @@ from pathlib import Path
5
  from collections import OrderedDict
6
 
7
 
8
- pretrained_models_dict = {'E2VID': "0000_000012", 'FireNet': "0000_000020",
9
- 'E2VID+': "0000_000000", 'FireNet+': "0000_000002",
10
- 'ET-Net': "0000_000030", 'SSL_E2VID': "0000_000040",
11
- 'SPADE_E2VID': "0000_000050"}
12
-
13
- full_ref_metrics = ['mse', 'ssim', 'lpips']
14
- no_ref_metrics = ['brisque', 'niqe', 'maniqa']
15
-
16
-
17
  def read_json(fname):
18
  fname = Path(fname)
19
  with fname.open('rt') as handle:
20
  return json.load(handle, object_hook=OrderedDict)
21
 
22
 
23
- def get_filenames(base_path, filename_pattern):
24
- file_names = []
25
  glob_pattern = os.path.join(base_path, filename_pattern)
26
- file_paths = glob.glob(glob_pattern)
27
  for file_path in file_paths:
28
  file_name = Path(file_path).stem
29
- file_names.append(file_name)
30
- return file_names
31
-
32
-
33
- def get_dataset_config_names(data_configs_path):
34
- data_configs_filename_pattern = "*.json"
35
- return get_filenames(data_configs_path, data_configs_filename_pattern)
36
-
37
-
38
- def get_sequence_names(data_configs_path, dataset_name):
39
- sequences = []
40
- dataset_file_name = os.path.join(data_configs_path, dataset_name + ".json")
41
- dataset_config = read_json(dataset_file_name)
42
- for sequence in dataset_config['sequences']:
43
- sequences.append(sequence)
44
- return sequences
 
 
 
 
5
  from collections import OrderedDict
6
 
7
 
 
 
 
 
 
 
 
 
 
8
  def read_json(fname):
9
  fname = Path(fname)
10
  with fname.open('rt') as handle:
11
  return json.load(handle, object_hook=OrderedDict)
12
 
13
 
14
+ def get_configs(base_path, filename_pattern="*.json"):
15
+ configs = []
16
  glob_pattern = os.path.join(base_path, filename_pattern)
17
+ file_paths = sorted(glob.glob(glob_pattern))
18
  for file_path in file_paths:
19
  file_name = Path(file_path).stem
20
+ config = read_json(file_path)
21
+ config['name'] = file_name
22
+ configs.append(config)
23
+ return configs
24
+
25
+
26
+ def get_display_names(configs):
27
+ display_names = []
28
+ for config in configs:
29
+ display_names.append(config['display_name'])
30
+ return display_names
31
+
32
+ # def get_sequence_names(data_configs_path, dataset_name):
33
+ # sequences = []
34
+ # dataset_file_name = os.path.join(data_configs_path, dataset_name + ".json")
35
+ # dataset_config = read_json(dataset_file_name)
36
+ # for sequence in dataset_config['sequences']:
37
+ # sequences.append(sequence)
38
+ # return sequences