koichi12 commited on
Commit
62eda46
·
verified ·
1 Parent(s): 52f152e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_general.yaml +5 -0
  2. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_economics.yaml +5 -0
  3. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_civics.yaml +5 -0
  4. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_history.yaml +5 -0
  5. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_islamic_studies.yaml +5 -0
  6. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_arabic_language.yaml +5 -0
  7. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_general_knowledge.yaml +5 -0
  8. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_geography.yaml +5 -0
  9. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_islamic_studies.yaml +5 -0
  10. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_social_science.yaml +5 -0
  11. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_prof_law.yaml +5 -0
  12. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_management.yaml +5 -0
  13. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_political_science.yaml +5 -0
  14. scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/utils.py +44 -0
  15. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/README.md +47 -0
  16. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_generate_configs.py +211 -0
  17. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml +7 -0
  18. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml +7 -0
  19. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml +7 -0
  20. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml +7 -0
  21. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml +7 -0
  22. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml +7 -0
  23. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml +7 -0
  24. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml +7 -0
  25. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml +7 -0
  26. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml +7 -0
  27. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml +7 -0
  28. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml +7 -0
  29. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml +7 -0
  30. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml +7 -0
  31. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml +7 -0
  32. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml +7 -0
  33. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml +7 -0
  34. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml +7 -0
  35. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml +7 -0
  36. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml +7 -0
  37. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml +7 -0
  38. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml +7 -0
  39. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml +7 -0
  40. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml +7 -0
  41. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml +7 -0
  42. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml +7 -0
  43. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml +7 -0
  44. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml +7 -0
  45. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml +7 -0
  46. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml +7 -0
  47. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_management_accounting.yaml +7 -0
  48. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml +7 -0
  49. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml +7 -0
  50. scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml +7 -0
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_arabic_language_general.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Arabic Language (General)"
2
+ "tag": "arabicmmlu_language_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_arabic_language_(general)"
5
+ "task_alias": "Arabic Language (General)"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_high_economics.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "High Economics"
2
+ "tag": "arabicmmlu_social_science_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_high_economics"
5
+ "task_alias": "High Economics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_civics.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Middle Civics"
2
+ "tag": "arabicmmlu_social_science_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_middle_civics"
5
+ "task_alias": "Middle Civics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_history.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Middle History"
2
+ "tag": "arabicmmlu_humanities_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_middle_history"
5
+ "task_alias": "Middle History"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_middle_islamic_studies.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Middle Islamic Studies"
2
+ "tag": "arabicmmlu_humanities_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_middle_islamic_studies"
5
+ "task_alias": "Middle Islamic Studies"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_arabic_language.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Primary Arabic Language"
2
+ "tag": "arabicmmlu_language_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_primary_arabic_language"
5
+ "task_alias": "Primary Arabic Language"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_general_knowledge.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Primary General Knowledge"
2
+ "tag": "arabicmmlu_other_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_primary_general_knowledge"
5
+ "task_alias": "Primary General Knowledge"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_geography.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Primary Geography"
2
+ "tag": "arabicmmlu_social_science_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_primary_geography"
5
+ "task_alias": "Primary Geography"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_islamic_studies.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Primary Islamic Studies"
2
+ "tag": "arabicmmlu_humanities_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_primary_islamic_studies"
5
+ "task_alias": "Primary Islamic Studies"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_primary_social_science.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Primary Social Science"
2
+ "tag": "arabicmmlu_social_science_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_primary_social_science"
5
+ "task_alias": "Primary Social Science"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_prof_law.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Prof Law"
2
+ "tag": "arabicmmlu_humanities_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_prof_law"
5
+ "task_alias": "Prof Law"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_management.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Univ Management"
2
+ "tag": "arabicmmlu_other_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_univ_management"
5
+ "task_alias": "Univ Management"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/arabicmmlu_univ_political_science.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ "dataset_name": "Univ Political Science"
2
+ "tag": "arabicmmlu_social_science_tasks"
3
+ "include": "_default_arabicmmlu_template_yaml"
4
+ "task": "arabicmmlu_univ_political_science"
5
+ "task_alias": "Univ Political Science"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/arabicmmlu/utils.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PROMPT = "This is a {}. Select the correct answer!\n\nQuestion: {}\n{}\n\nAnswer:"
2
+
3
+ level_en = {
4
+ "Primary": "primary school",
5
+ "Middle": "middle school",
6
+ "High": "high school",
7
+ "Univ": "university",
8
+ "Prof": "professional",
9
+ }
10
+
11
+ alpa = ["A.", "B.", "C.", "D.", "E."]
12
+
13
+
14
+ def doc_to_text(doc):
15
+ """
16
+ Refactoring `prepare_data_en` to fit with the lm harness framework.
17
+ https://github.com/mbzuai-nlp/ArabicMMLU/blob/main/util_prompt.py
18
+ """
19
+
20
+ level = "" if not doc["Level"] else " for " + level_en[doc["Level"]]
21
+ country = "" if not doc["Country"] else " in " + doc["Country"]
22
+ main_meta_data = f"{doc['Subject']} question{level}{country}"
23
+
24
+ question = (
25
+ doc["Question"]
26
+ if doc["Context"] == ""
27
+ else f"{doc['Context']}\n\n{doc['Question']}"
28
+ )
29
+
30
+ options = []
31
+ for i, opt in enumerate(
32
+ ["Option 1", "Option 2", "Option 3", "Option 4", "Option 5"]
33
+ ):
34
+ if not doc[opt]:
35
+ break
36
+ options.append(f"{alpa[i]} {doc[opt]}")
37
+
38
+ doc_text = PROMPT.format(main_meta_data, question, "\n".join(options))
39
+
40
+ return doc_text
41
+
42
+
43
+ def doc_to_choice(doc):
44
+ return [alpa[i][0] for i in range(5) if doc[f"Option {i+1}"]]
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TMMLU+
2
+
3
+ ### Paper
4
+
5
+ Title: `An Improved Traditional Chinese Evaluation Suite for Foundation Model`
6
+
7
+ Abstract: `We present TMMLU+, a comprehensive dataset designed for the Traditional Chinese massive multitask language understanding dataset. TMMLU+ is a multiple-choice question-answering dataset with 66 subjects from elementary to professional level. Compared to its predecessor, TMMLU, TMMLU+ is six times larger and boasts a more balanced subject distribution. We included benchmark results in TMMLU+ from closed-source models and 24 open-weight Chinese large language models of parameters ranging from 1.8B to 72B. Our findings reveal that Traditional Chinese models still trail behind their Simplified Chinese counterparts. Additionally, current large language models have yet to outperform human performance in average scores. We publicly release our dataset and the corresponding benchmark source code.`
8
+
9
+
10
+ Homepage: [https://huggingface.co/datasets/ikala/tmmluplus](https://huggingface.co/datasets/ikala/tmmluplus)
11
+
12
+
13
+ ### Citation
14
+
15
+ ```
16
+ @article{ikala2024improved,
17
+ title={An Improved Traditional Chinese Evaluation Suite for Foundation Model},
18
+ author={Tam, Zhi-Rui and Pai, Ya-Ting and Lee, Yen-Wei and Cheng, Sega and Shuai, Hong-Han},
19
+ journal={arXiv preprint arXiv:2403.01858},
20
+ year={2024}
21
+ }
22
+ ```
23
+
24
+ ### Groups and Tasks
25
+
26
+ #### Groups
27
+
28
+ * `tmmluplus`: `The dataset comprises 22,690 multiple-choice questions from 66 subjects ranging from primary to professional level. `
29
+
30
+ #### Tasks
31
+
32
+ The following tasks evaluate subjects in the TMMLU+ dataset using loglikelihood-based multiple-choice scoring:
33
+
34
+ * `tmmluplus_{subject_english}`
35
+
36
+ ### Checklist
37
+
38
+ For adding novel benchmarks/datasets to the library:
39
+ * [x] Is the task an existing benchmark in the literature?
40
+ * [x] Have you referenced the original paper that introduced the task?
41
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
42
+
43
+
44
+ If other tasks on this dataset are already supported:
45
+ * [x] Is the "Main" variant of this task clearly denoted?
46
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
47
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/_generate_configs.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Take in a YAML, and output all "other" splits with this YAML
3
+ """
4
+
5
+ import argparse
6
+ import os
7
+
8
+ import pandas as pd
9
+ import yaml
10
+ from tqdm import tqdm
11
+
12
+
13
+ # Copy from https://github.com/iKala/ievals/blob/main/ievals/settings.py
14
+ # from TMMLU+ offical example
15
+ categories = {
16
+ "STEM": [
17
+ "physics",
18
+ "chemistry",
19
+ "biology",
20
+ "computer science",
21
+ "math",
22
+ "engineering",
23
+ ],
24
+ "humanities": ["history", "philosophy", "law"],
25
+ "social_sciences": [
26
+ "politics",
27
+ "culture",
28
+ "economics",
29
+ "geography",
30
+ "psychology",
31
+ "education",
32
+ ],
33
+ "other": ["other", "business", "health"], # (business, health, misc.)
34
+ }
35
+
36
+ task_list = [
37
+ "engineering_math",
38
+ "dentistry",
39
+ "traditional_chinese_medicine_clinical_medicine",
40
+ "clinical_psychology",
41
+ "technical",
42
+ "culinary_skills",
43
+ "mechanical",
44
+ "logic_reasoning",
45
+ "real_estate",
46
+ "general_principles_of_law",
47
+ "finance_banking",
48
+ "anti_money_laundering",
49
+ "ttqav2",
50
+ "marketing_management",
51
+ "business_management",
52
+ "organic_chemistry",
53
+ "advance_chemistry",
54
+ "physics",
55
+ "secondary_physics",
56
+ "human_behavior",
57
+ "national_protection",
58
+ "jce_humanities",
59
+ "politic_science",
60
+ "agriculture",
61
+ "official_document_management",
62
+ "financial_analysis",
63
+ "pharmacy",
64
+ "educational_psychology",
65
+ "statistics_and_machine_learning",
66
+ "management_accounting",
67
+ "introduction_to_law",
68
+ "computer_science",
69
+ "veterinary_pathology",
70
+ "accounting",
71
+ "fire_science",
72
+ "optometry",
73
+ "insurance_studies",
74
+ "pharmacology",
75
+ "taxation",
76
+ "education_(profession_level)",
77
+ "economics",
78
+ "veterinary_pharmacology",
79
+ "nautical_science",
80
+ "occupational_therapy_for_psychological_disorders",
81
+ "trust_practice",
82
+ "geography_of_taiwan",
83
+ "physical_education",
84
+ "auditing",
85
+ "administrative_law",
86
+ "basic_medical_science",
87
+ "macroeconomics",
88
+ "trade",
89
+ "chinese_language_and_literature",
90
+ "tve_design",
91
+ "junior_science_exam",
92
+ "junior_math_exam",
93
+ "junior_chinese_exam",
94
+ "junior_social_studies",
95
+ "tve_mathematics",
96
+ "tve_chinese_language",
97
+ "tve_natural_sciences",
98
+ "junior_chemistry",
99
+ "music",
100
+ "education",
101
+ "three_principles_of_people",
102
+ "taiwanese_hokkien",
103
+ ]
104
+ subject2name = {}
105
+ # subject2category = {}
106
+ SUBJECTS = {}
107
+
108
+
109
+ def parse_args():
110
+ parser = argparse.ArgumentParser()
111
+ parser.add_argument("--base_yaml_path", required=True)
112
+ parser.add_argument("--save_prefix_path", default="tmmluplus")
113
+ parser.add_argument("--cot_prompt_path", default=None)
114
+ parser.add_argument("--task_prefix", default="")
115
+ parser.add_argument("--group_prefix", default="")
116
+ parser.add_argument("--subject_file", default="subject.tsv")
117
+ return parser.parse_args()
118
+
119
+
120
+ if __name__ == "__main__":
121
+ args = parse_args()
122
+ from pathlib import Path
123
+
124
+ # Initialization
125
+ SUBJECT_FILE = Path(__file__).parent / Path(args.subject_file)
126
+
127
+ df = pd.read_csv(SUBJECT_FILE, delimiter="\t")
128
+
129
+ for _, row in df.iterrows():
130
+ for _c in categories:
131
+ if row["subject"] in SUBJECTS:
132
+ raise ValueError("Duplicate tasks.")
133
+ if row["category"] in categories[_c]: # append new item into SUBJECTS
134
+ SUBJECTS[row["subject"]] = _c
135
+ subject2name[row["subject"]] = row["name"]
136
+ break
137
+ # End of SUBJECTS initialization
138
+
139
+ # get filename of base_yaml so we can `"include": ` it in our "other" YAMLs.
140
+ base_yaml_name = os.path.split(args.base_yaml_path)[-1]
141
+ with open(args.base_yaml_path) as f:
142
+ base_yaml = yaml.full_load(f)
143
+
144
+ if args.cot_prompt_path is not None:
145
+ import json
146
+
147
+ with open(args.cot_prompt_path) as f:
148
+ cot_file = json.load(f)
149
+
150
+ ALL_CATEGORIES = []
151
+ for subject, category in tqdm(SUBJECTS.items()):
152
+ if category not in ALL_CATEGORIES:
153
+ ALL_CATEGORIES.append(category)
154
+
155
+ if args.cot_prompt_path is not None:
156
+ description = cot_file[subject]
157
+ else:
158
+ name_of_subject = subject2name[subject].replace("_", " ")
159
+ description = f"以下為{name_of_subject}的單選題,請提供正確答案的選項。\n\n"
160
+ # description = f"The following are multiple choice questions (with answers) about {' '.join(subject.split('_'))}.\n\n"
161
+
162
+ yaml_dict = {
163
+ "include": base_yaml_name,
164
+ "group": f"tmmluplus_{args.task_prefix}_{category}"
165
+ if args.task_prefix != ""
166
+ else f"tmmluplus_{category}",
167
+ "group_alias": category.replace("_", " "),
168
+ "task": f"tmmluplus_{args.task_prefix}_{subject}"
169
+ if args.task_prefix != ""
170
+ else f"tmmluplus_{subject}",
171
+ "task_alias": subject.replace("_", " "),
172
+ "dataset_name": subject,
173
+ "description": description,
174
+ }
175
+
176
+ file_save_path = args.save_prefix_path + f"_{subject}.yaml"
177
+ # eval_logger.info(f"Saving yaml for subset {subject} to {file_save_path}")
178
+ with open(file_save_path, "w") as yaml_file:
179
+ yaml.dump(
180
+ yaml_dict,
181
+ yaml_file,
182
+ # width=float("inf"),
183
+ allow_unicode=True,
184
+ default_style='"',
185
+ )
186
+
187
+ if args.task_prefix != "":
188
+ mmlu_subcategories = [
189
+ f"tmmluplus_{args.task_prefix}_{category}" for category in ALL_CATEGORIES
190
+ ]
191
+ else:
192
+ mmlu_subcategories = [f"tmmluplus_{category}" for category in ALL_CATEGORIES]
193
+
194
+ if args.group_prefix != "":
195
+ file_save_path = args.group_prefix + ".yaml"
196
+ else:
197
+ file_save_path = args.save_prefix_path + ".yaml"
198
+
199
+ # eval_logger.info(f"Saving benchmark config to {file_save_path}")
200
+ with open(file_save_path, "w") as yaml_file:
201
+ yaml.dump(
202
+ {
203
+ "group": f"tmmluplus_{args.task_prefix}"
204
+ if args.task_prefix != ""
205
+ else "tmmluplus",
206
+ "task": mmlu_subcategories,
207
+ },
208
+ yaml_file,
209
+ indent=4,
210
+ default_flow_style=False,
211
+ )
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "accounting"
2
+ "description": "以下為會計學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_accounting"
7
+ "task_alias": "accounting"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "administrative_law"
2
+ "description": "以下為行政法的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_administrative_law"
7
+ "task_alias": "administrative law"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "agriculture"
2
+ "description": "以下為農業的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_agriculture"
7
+ "task_alias": "agriculture"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "anti_money_laundering"
2
+ "description": "以下為洗錢防制的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_anti_money_laundering"
7
+ "task_alias": "anti money laundering"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "auditing"
2
+ "description": "以下為審計學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_auditing"
7
+ "task_alias": "auditing"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "basic_medical_science"
2
+ "description": "以下為基礎醫學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_basic_medical_science"
7
+ "task_alias": "basic medical science"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "business_management"
2
+ "description": "以下為企業管理的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_business_management"
7
+ "task_alias": "business management"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "chinese_language_and_literature"
2
+ "description": "以下為國文的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_chinese_language_and_literature"
7
+ "task_alias": "chinese language and literature"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "clinical_psychology"
2
+ "description": "以下為臨床心理學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_clinical_psychology"
7
+ "task_alias": "clinical psychology"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "computer_science"
2
+ "description": "以下為資訊工程的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_computer_science"
7
+ "task_alias": "computer science"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "dentistry"
2
+ "description": "以下為牙醫學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_dentistry"
7
+ "task_alias": "dentistry"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "economics"
2
+ "description": "以下為經濟學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_economics"
7
+ "task_alias": "economics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "education"
2
+ "description": "以下為教育常識的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_education"
7
+ "task_alias": "education"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "education_(profession_level)"
2
+ "description": "以下為教育專業的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_education_(profession_level)"
7
+ "task_alias": "education (profession level)"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "educational_psychology"
2
+ "description": "以下為教育心理的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_educational_psychology"
7
+ "task_alias": "educational psychology"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "engineering_math"
2
+ "description": "以下為工程數學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_engineering_math"
7
+ "task_alias": "engineering math"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "finance_banking"
2
+ "description": "以下為金融與法規的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_finance_banking"
7
+ "task_alias": "finance banking"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "financial_analysis"
2
+ "description": "以下為財務分析的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_financial_analysis"
7
+ "task_alias": "financial analysis"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "fire_science"
2
+ "description": "以下為火災學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_fire_science"
7
+ "task_alias": "fire science"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "human_behavior"
2
+ "description": "以下為人類行為與社會的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_human_behavior"
7
+ "task_alias": "human behavior"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "insurance_studies"
2
+ "description": "以下為保險學的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_insurance_studies"
7
+ "task_alias": "insurance studies"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "introduction_to_law"
2
+ "description": "以下為法律概論的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_humanities"
4
+ "group_alias": "humanities"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_introduction_to_law"
7
+ "task_alias": "introduction to law"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "junior_chemistry"
2
+ "description": "以下為國中理化的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_junior_chemistry"
7
+ "task_alias": "junior chemistry"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "junior_chinese_exam"
2
+ "description": "以下為國中會考基測國文的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_junior_chinese_exam"
7
+ "task_alias": "junior chinese exam"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "junior_math_exam"
2
+ "description": "以下為國中會考基測數學科的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_junior_math_exam"
7
+ "task_alias": "junior math exam"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "junior_science_exam"
2
+ "description": "以下為國中會考基測自然科的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_junior_science_exam"
7
+ "task_alias": "junior science exam"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "junior_social_studies"
2
+ "description": "以下為國中會考基測社會科的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_junior_social_studies"
7
+ "task_alias": "junior social studies"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "linear_algebra"
2
+ "description": "以下為線代的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_STEM"
4
+ "group_alias": "STEM"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_linear_algebra"
7
+ "task_alias": "linear algebra"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logic_reasoning"
2
+ "description": "以下為邏輯思維的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_logic_reasoning"
7
+ "task_alias": "logic reasoning"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "macroeconomics"
2
+ "description": "以下為總經的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_macroeconomics"
7
+ "task_alias": "macroeconomics"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_management_accounting.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "management_accounting"
2
+ "description": "以下為管理會計的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_management_accounting"
7
+ "task_alias": "management accounting"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "marketing_management"
2
+ "description": "以下為行銷管理的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_marketing_management"
7
+ "task_alias": "marketing management"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "music"
2
+ "description": "以下為音樂科的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_other"
4
+ "group_alias": "other"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_music"
7
+ "task_alias": "music"
scripts/yans/lm-evaluation-harness/lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ "dataset_name": "national_protection"
2
+ "description": "以下為軍事的單選題,請提供正確答案的選項。\n\n"
3
+ "group": "tmmluplus_social_sciences"
4
+ "group_alias": "social sciences"
5
+ "include": "_default_template_yaml"
6
+ "task": "tmmluplus_national_protection"
7
+ "task_alias": "national protection"