shahriarshm commited on
Commit
2e771a4
1 Parent(s): bea6511

update leaderboard config

Browse files
Files changed (2) hide show
  1. src/about.py +8 -40
  2. src/leaderboard/read_evals.py +8 -6
src/about.py CHANGED
@@ -12,59 +12,27 @@ class Task:
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
- task0 = Task("anli_r1", "acc", "ANLI")
16
- task1 = Task("logiqa", "acc_norm", "LogiQA")
17
-
18
- NUM_FEWSHOT = 0 # Change with your few shot
19
- # ---------------------------------------------------
20
-
21
 
22
 
23
  # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""
25
 
26
  # What does your leaderboard evaluate?
27
  INTRODUCTION_TEXT = """
28
- Intro text
29
  """
30
 
31
  # Which evaluations are you running? how can people reproduce what you have?
32
  LLM_BENCHMARKS_TEXT = f"""
33
- ## How it works
34
-
35
- ## Reproducibility
36
- To reproduce our results, here is the commands you can run:
37
-
38
  """
39
 
40
  EVALUATION_QUEUE_TEXT = """
41
- ## Some good practices before submitting a model
42
-
43
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
44
- ```python
45
- from transformers import AutoConfig, AutoModel, AutoTokenizer
46
- config = AutoConfig.from_pretrained("your model name", revision=revision)
47
- model = AutoModel.from_pretrained("your model name", revision=revision)
48
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
49
- ```
50
- If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
-
52
- Note: make sure your model is public!
53
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
54
-
55
- ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
56
- It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
57
-
58
- ### 3) Make sure your model has an open license!
59
- This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗
60
-
61
- ### 4) Fill up your model card
62
- When we add extra information about models to the leaderboard, it will be automatically taken from the model card
63
-
64
- ## In case of model failure
65
- If your model is displayed in the `FAILED` category, its execution stopped.
66
- Make sure you have followed the above steps first.
67
- If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
68
  """
69
 
70
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
 
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
+ ParsiNLUـEntailment = Task("ParsiNLU Entailment", "Exact Match", "ParsiNLU Entailment")
16
+ ParsiNLU_Machine_Translation_Fa_En = Task("ParsiNLU Machine Translation Fa-En", "English Sentence Bleu", "ParsiNLU Machine Translation Fa-En")
17
+ ParsiNLU_Machine_Translation_En_Fa = Task("ParsiNLU Machine Translation En-Fa", "Persian Sentence Bleu", "ParsiNLU Machine Translation En-Fa")
18
+ ParsiNLU_Reading_Comprehension = Task("ParsiNLU Reading Comprehension", "Common Tokens", "ParsiNLU Reading Comprehension")
19
+ Persian_Math = Task("Persian Math", "Math Equivalence", "Persian Math")
 
20
 
21
 
22
  # Your leaderboard name
23
+ TITLE = """<h1 align="center" id="space-title">ParsBench Leaderboard</h1>"""
24
 
25
  # What does your leaderboard evaluate?
26
  INTRODUCTION_TEXT = """
27
+ This leaderboard is created using <a href="https://github.com/shahriarshm/parsbench">ParsBench</a> framework benchmarking toolkit.
28
  """
29
 
30
  # Which evaluations are you running? how can people reproduce what you have?
31
  LLM_BENCHMARKS_TEXT = f"""
 
 
 
 
 
32
  """
33
 
34
  EVALUATION_QUEUE_TEXT = """
35
+ For now, you can contact me at [email protected] for submitting a new request.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
 
38
  CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
src/leaderboard/read_evals.py CHANGED
@@ -136,7 +136,7 @@ def get_request_file_for_model(requests_path, model_name, precision):
136
  """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
  request_files = os.path.join(
138
  requests_path,
139
- f"{model_name}_eval_request_*.json",
140
  )
141
  request_files = glob.glob(request_files)
142
 
@@ -156,6 +156,8 @@ def get_request_file_for_model(requests_path, model_name, precision):
156
 
157
  def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
  """From the path of the results folder root, extract all needed info for results"""
 
 
159
  model_result_filepaths = []
160
 
161
  for root, _, files in os.walk(results_path):
@@ -187,10 +189,10 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
187
 
188
  results = []
189
  for v in eval_results.values():
190
- try:
191
- v.to_dict() # we test if the dict version is complete
192
- results.append(v)
193
- except KeyError: # not all eval values present
194
- continue
195
 
196
  return results
 
136
  """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED"""
137
  request_files = os.path.join(
138
  requests_path,
139
+ f"leaderboard/{model_name}_eval_request_*.json",
140
  )
141
  request_files = glob.glob(request_files)
142
 
 
156
 
157
  def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResult]:
158
  """From the path of the results folder root, extract all needed info for results"""
159
+ results_path = os.path.join(results_path, "leaderboard")
160
+
161
  model_result_filepaths = []
162
 
163
  for root, _, files in os.walk(results_path):
 
189
 
190
  results = []
191
  for v in eval_results.values():
192
+ # try:
193
+ v.to_dict() # we test if the dict version is complete
194
+ results.append(v)
195
+ # except KeyError: # not all eval values present
196
+ # continue
197
 
198
  return results