Tristan Thrush commited on
Commit
d7af059
β€’
1 Parent(s): 32dc08d

added url redirect for dataset selection

Browse files
Files changed (2) hide show
  1. app.py +14 -3
  2. requirements.txt +1 -0
app.py CHANGED
@@ -38,11 +38,22 @@ st.markdown(
38
  you to evaluate any πŸ€— Transformers model with a dataset on the Hub. Please
39
  select the dataset and configuration below. The results of your evaluation
40
  will be displayed on the public leaderboard
41
- [here](https://huggingface.co/spaces/huggingface/leaderboards).
42
  """
43
  )
44
 
45
- dataset_name = st.selectbox("Select a dataset", [f"lewtun/autoevaluate__{dset}" for dset in DATASETS_TO_EVALUATE])
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  # TODO: remove this step once we select real datasets
48
  # Strip out original dataset name
@@ -110,7 +121,7 @@ with st.form(key="form"):
110
  f"""
111
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
112
 
113
- * πŸ“Š Click [here](https://huggingface.co/spaces/huggingface/leaderboards) to view the results from your submission
114
  """
115
  )
116
  else:
 
38
  you to evaluate any πŸ€— Transformers model with a dataset on the Hub. Please
39
  select the dataset and configuration below. The results of your evaluation
40
  will be displayed on the public leaderboard
41
+ [here](https://huggingface.co/spaces/autoevaluate/leaderboards).
42
  """
43
  )
44
 
45
+ selectable_datasets = [f"lewtun/autoevaluate__{dset}" for dset in DATASETS_TO_EVALUATE]
46
+
47
+ query_params = st.experimental_get_query_params()
48
+ default_dataset = selectable_datasets[0]
49
+ if "dataset" in query_params:
50
+ if len(query_params["dataset"]) > 0 and query_params["dataset"][0] in selectable_datasets:
51
+ default_dataset = query_params["dataset"][0]
52
+
53
+ dataset_name = st.selectbox(
54
+ "Select a dataset",
55
+ selectable_datasets,
56
+ index=selectable_datasets.index(default_dataset))
57
 
58
  # TODO: remove this step once we select real datasets
59
  # Strip out original dataset name
 
121
  f"""
122
  Evaluation takes appoximately 1 hour to complete, so grab a β˜• or 🍡 while you wait:
123
 
124
+ * πŸ“Š Click [here](https://huggingface.co/spaces/autoevaluate/leaderboards) to view the results from your submission
125
  """
126
  )
127
  else:
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  huggingface-hub==0.4.0
2
  python-dotenv
 
 
1
  huggingface-hub==0.4.0
2
  python-dotenv
3
+ streamlit