diff --git a/ABX-AI__Silver-Sun-v2-11B/.ipynb_checkpoints/results_2024-07-02T00-46-34.040470-checkpoint.json b/ABX-AI__Silver-Sun-v2-11B/.ipynb_checkpoints/results_2024-07-02T00-46-34.040470-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..f4aa755337eacbca946c89d72f94012bd469b3db
--- /dev/null
+++ b/ABX-AI__Silver-Sun-v2-11B/.ipynb_checkpoints/results_2024-07-02T00-46-34.040470-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6798446524596694,
+ "acc_stderr,none": 0.0046558259808919715,
+ "acc_norm,none": 0.8639713204540929,
+ "acc_norm_stderr,none": 0.003421183909320265,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.92376298818061,
+ "eqbench_stderr,none": 2.302476504280005,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=ABX-AI/Silver-Sun-v2-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "052f5514e25a2c0d3622f2aa84c9662ebca41eba",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719878990.231564,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "ABX-AI/Silver-Sun-v2-11B",
+ "model_name_sanitized": "ABX-AI__Silver-Sun-v2-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90285.382371444,
+ "end_time": 92495.969871496,
+ "total_evaluation_time_seconds": "2210.5875000520027"
+}
\ No newline at end of file
diff --git a/ABX-AI__Silver-Sun-v2-11B/results_2024-07-02T00-46-34.040470.json b/ABX-AI__Silver-Sun-v2-11B/results_2024-07-02T00-46-34.040470.json
new file mode 100644
index 0000000000000000000000000000000000000000..f4aa755337eacbca946c89d72f94012bd469b3db
--- /dev/null
+++ b/ABX-AI__Silver-Sun-v2-11B/results_2024-07-02T00-46-34.040470.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6798446524596694,
+ "acc_stderr,none": 0.0046558259808919715,
+ "acc_norm,none": 0.8639713204540929,
+ "acc_norm_stderr,none": 0.003421183909320265,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.92376298818061,
+ "eqbench_stderr,none": 2.302476504280005,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=ABX-AI/Silver-Sun-v2-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "052f5514e25a2c0d3622f2aa84c9662ebca41eba",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719878990.231564,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "ABX-AI/Silver-Sun-v2-11B",
+ "model_name_sanitized": "ABX-AI__Silver-Sun-v2-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90285.382371444,
+ "end_time": 92495.969871496,
+ "total_evaluation_time_seconds": "2210.5875000520027"
+}
\ No newline at end of file
diff --git a/BlueNipples__SnowLotus-v2-10.7B/.ipynb_checkpoints/results_2024-07-01T22-45-32.913168-checkpoint.json b/BlueNipples__SnowLotus-v2-10.7B/.ipynb_checkpoints/results_2024-07-01T22-45-32.913168-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..6720dd66ada4da84b08543f17582761c70ee4d3e
--- /dev/null
+++ b/BlueNipples__SnowLotus-v2-10.7B/.ipynb_checkpoints/results_2024-07-01T22-45-32.913168-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6488747261501693,
+ "acc_stderr,none": 0.004763465139038578,
+ "acc_norm,none": 0.834196375224059,
+ "acc_norm_stderr,none": 0.0037114419828661186,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 60.53953285457271,
+ "eqbench_stderr,none": 2.6096565956249744,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374273,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=BlueNipples/SnowLotus-v2-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e655cc010d33dca9a7ac37a640ea45b82fef054a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719871605.6418686,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "BlueNipples/SnowLotus-v2-10.7B",
+ "model_name_sanitized": "BlueNipples__SnowLotus-v2-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 82900.70551701,
+ "end_time": 85234.842583591,
+ "total_evaluation_time_seconds": "2334.1370665809955"
+}
\ No newline at end of file
diff --git a/BlueNipples__SnowLotus-v2-10.7B/results_2024-07-01T22-45-32.913168.json b/BlueNipples__SnowLotus-v2-10.7B/results_2024-07-01T22-45-32.913168.json
new file mode 100644
index 0000000000000000000000000000000000000000..6720dd66ada4da84b08543f17582761c70ee4d3e
--- /dev/null
+++ b/BlueNipples__SnowLotus-v2-10.7B/results_2024-07-01T22-45-32.913168.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6488747261501693,
+ "acc_stderr,none": 0.004763465139038578,
+ "acc_norm,none": 0.834196375224059,
+ "acc_norm_stderr,none": 0.0037114419828661186,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 60.53953285457271,
+ "eqbench_stderr,none": 2.6096565956249744,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374273,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=BlueNipples/SnowLotus-v2-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e655cc010d33dca9a7ac37a640ea45b82fef054a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719871605.6418686,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "BlueNipples/SnowLotus-v2-10.7B",
+ "model_name_sanitized": "BlueNipples__SnowLotus-v2-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 82900.70551701,
+ "end_time": 85234.842583591,
+ "total_evaluation_time_seconds": "2334.1370665809955"
+}
\ No newline at end of file
diff --git a/Crimvael__Raphael-7B/.ipynb_checkpoints/results_2024-07-02T03-45-26.455365-checkpoint.json b/Crimvael__Raphael-7B/.ipynb_checkpoints/results_2024-07-02T03-45-26.455365-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..097dd03930ed50aab8fc68b2875ed756d9294102
--- /dev/null
+++ b/Crimvael__Raphael-7B/.ipynb_checkpoints/results_2024-07-02T03-45-26.455365-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6527584146584345,
+ "acc_stderr,none": 0.004751203378888043,
+ "acc_norm,none": 0.8346942840071699,
+ "acc_norm_stderr,none": 0.0037069708564110657,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.72823492962466,
+ "eqbench_stderr,none": 2.1836213516902125,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Crimvael/Raphael-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c5a262bacfaf9f4a822954223462f428f7cb6290",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719890419.8683555,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Crimvael/Raphael-7B",
+ "model_name_sanitized": "Crimvael__Raphael-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 101714.859551874,
+ "end_time": 103228.384796742,
+ "total_evaluation_time_seconds": "1513.5252448679967"
+}
\ No newline at end of file
diff --git a/Crimvael__Raphael-7B/results_2024-07-02T03-45-26.455365.json b/Crimvael__Raphael-7B/results_2024-07-02T03-45-26.455365.json
new file mode 100644
index 0000000000000000000000000000000000000000..097dd03930ed50aab8fc68b2875ed756d9294102
--- /dev/null
+++ b/Crimvael__Raphael-7B/results_2024-07-02T03-45-26.455365.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6527584146584345,
+ "acc_stderr,none": 0.004751203378888043,
+ "acc_norm,none": 0.8346942840071699,
+ "acc_norm_stderr,none": 0.0037069708564110657,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.72823492962466,
+ "eqbench_stderr,none": 2.1836213516902125,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Crimvael/Raphael-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c5a262bacfaf9f4a822954223462f428f7cb6290",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719890419.8683555,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Crimvael/Raphael-7B",
+ "model_name_sanitized": "Crimvael__Raphael-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 101714.859551874,
+ "end_time": 103228.384796742,
+ "total_evaluation_time_seconds": "1513.5252448679967"
+}
\ No newline at end of file
diff --git a/Delcos__Mistral-Pygmalion-7b/.ipynb_checkpoints/results_2024-07-02T07-11-52.058605-checkpoint.json b/Delcos__Mistral-Pygmalion-7b/.ipynb_checkpoints/results_2024-07-02T07-11-52.058605-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..94bfe2d967eb68aa807a5e02f0b336f735728bf0
--- /dev/null
+++ b/Delcos__Mistral-Pygmalion-7b/.ipynb_checkpoints/results_2024-07-02T07-11-52.058605-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.571400119498108,
+ "acc_stderr,none": 0.004938643787869521,
+ "acc_norm,none": 0.7660824536944831,
+ "acc_norm_stderr,none": 0.004224552134436904,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 17.344080969420517,
+ "eqbench_stderr,none": 3.6133795461572396,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Delcos/Mistral-Pygmalion-7b,trust_remote_code=True",
+ "model_num_parameters": 6738415616,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "2cf8706d62541ba6d647562055cdc08bc70500a1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 32
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719902850.114841,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Delcos/Mistral-Pygmalion-7b",
+ "model_name_sanitized": "Delcos__Mistral-Pygmalion-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 114145.16115359,
+ "end_time": 115613.988031289,
+ "total_evaluation_time_seconds": "1468.8268776990008"
+}
\ No newline at end of file
diff --git a/Delcos__Mistral-Pygmalion-7b/results_2024-07-02T07-11-52.058605.json b/Delcos__Mistral-Pygmalion-7b/results_2024-07-02T07-11-52.058605.json
new file mode 100644
index 0000000000000000000000000000000000000000..94bfe2d967eb68aa807a5e02f0b336f735728bf0
--- /dev/null
+++ b/Delcos__Mistral-Pygmalion-7b/results_2024-07-02T07-11-52.058605.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.571400119498108,
+ "acc_stderr,none": 0.004938643787869521,
+ "acc_norm,none": 0.7660824536944831,
+ "acc_norm_stderr,none": 0.004224552134436904,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 17.344080969420517,
+ "eqbench_stderr,none": 3.6133795461572396,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Delcos/Mistral-Pygmalion-7b,trust_remote_code=True",
+ "model_num_parameters": 6738415616,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "2cf8706d62541ba6d647562055cdc08bc70500a1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 32
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719902850.114841,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Delcos/Mistral-Pygmalion-7b",
+ "model_name_sanitized": "Delcos__Mistral-Pygmalion-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 114145.16115359,
+ "end_time": 115613.988031289,
+ "total_evaluation_time_seconds": "1468.8268776990008"
+}
\ No newline at end of file
diff --git a/FallenMerick__Chewy-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T19-12-13.115090-checkpoint.json b/FallenMerick__Chewy-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T19-12-13.115090-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..29ae7a7252649ddbf9a05ab6bdcbd6f3c30bd9a2
--- /dev/null
+++ b/FallenMerick__Chewy-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T19-12-13.115090-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6618203545110536,
+ "acc_stderr,none": 0.004721231637092694,
+ "acc_norm,none": 0.843855805616411,
+ "acc_norm_stderr,none": 0.0036225013703318895,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 76.2370547740446,
+ "eqbench_stderr,none": 1.7148952450726893,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Chewy-Lemon-Cookie-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "0f5d0d6d218b3ef034f58eba32d6fe7ac4c237ae",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719858887.6024628,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Chewy-Lemon-Cookie-11B",
+ "model_name_sanitized": "FallenMerick__Chewy-Lemon-Cookie-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 70182.562797015,
+ "end_time": 72435.044500542,
+ "total_evaluation_time_seconds": "2252.48170352701"
+}
\ No newline at end of file
diff --git a/FallenMerick__Chewy-Lemon-Cookie-11B/results_2024-07-01T19-12-13.115090.json b/FallenMerick__Chewy-Lemon-Cookie-11B/results_2024-07-01T19-12-13.115090.json
new file mode 100644
index 0000000000000000000000000000000000000000..29ae7a7252649ddbf9a05ab6bdcbd6f3c30bd9a2
--- /dev/null
+++ b/FallenMerick__Chewy-Lemon-Cookie-11B/results_2024-07-01T19-12-13.115090.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6618203545110536,
+ "acc_stderr,none": 0.004721231637092694,
+ "acc_norm,none": 0.843855805616411,
+ "acc_norm_stderr,none": 0.0036225013703318895,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 76.2370547740446,
+ "eqbench_stderr,none": 1.7148952450726893,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Chewy-Lemon-Cookie-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "0f5d0d6d218b3ef034f58eba32d6fe7ac4c237ae",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719858887.6024628,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Chewy-Lemon-Cookie-11B",
+ "model_name_sanitized": "FallenMerick__Chewy-Lemon-Cookie-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 70182.562797015,
+ "end_time": 72435.044500542,
+ "total_evaluation_time_seconds": "2252.48170352701"
+}
\ No newline at end of file
diff --git a/FallenMerick__Chunky-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T18-34-32.911166-checkpoint.json b/FallenMerick__Chunky-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T18-34-32.911166-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa8d98c13e5254fda6c747e35f4ef56acfd5e227
--- /dev/null
+++ b/FallenMerick__Chunky-Lemon-Cookie-11B/.ipynb_checkpoints/results_2024-07-01T18-34-32.911166-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6622186815375424,
+ "acc_stderr,none": 0.004719870074967253,
+ "acc_norm,none": 0.8435570603465445,
+ "acc_norm_stderr,none": 0.0036253232211662535,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 76.2907953282312,
+ "eqbench_stderr,none": 1.7296444904041766,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Chunky-Lemon-Cookie-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "849afd37a9995d7a88503e4ef4847c5d9d239e2a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719856428.7962418,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Chunky-Lemon-Cookie-11B",
+ "model_name_sanitized": "FallenMerick__Chunky-Lemon-Cookie-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 67723.856639362,
+ "end_time": 70174.840569089,
+ "total_evaluation_time_seconds": "2450.983929727008"
+}
\ No newline at end of file
diff --git a/FallenMerick__Chunky-Lemon-Cookie-11B/results_2024-07-01T18-34-32.911166.json b/FallenMerick__Chunky-Lemon-Cookie-11B/results_2024-07-01T18-34-32.911166.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa8d98c13e5254fda6c747e35f4ef56acfd5e227
--- /dev/null
+++ b/FallenMerick__Chunky-Lemon-Cookie-11B/results_2024-07-01T18-34-32.911166.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6622186815375424,
+ "acc_stderr,none": 0.004719870074967253,
+ "acc_norm,none": 0.8435570603465445,
+ "acc_norm_stderr,none": 0.0036253232211662535,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 76.2907953282312,
+ "eqbench_stderr,none": 1.7296444904041766,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Chunky-Lemon-Cookie-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "849afd37a9995d7a88503e4ef4847c5d9d239e2a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719856428.7962418,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Chunky-Lemon-Cookie-11B",
+ "model_name_sanitized": "FallenMerick__Chunky-Lemon-Cookie-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 67723.856639362,
+ "end_time": 70174.840569089,
+ "total_evaluation_time_seconds": "2450.983929727008"
+}
\ No newline at end of file
diff --git a/FallenMerick__Iced-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-29T01-06-21.272851-checkpoint.json b/FallenMerick__Iced-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-29T01-06-21.272851-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..f70e6cb7d4fbce6e8712b995b7155042a52477e1
--- /dev/null
+++ b/FallenMerick__Iced-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-29T01-06-21.272851-checkpoint.json
@@ -0,0 +1,4035 @@
+{
+ "results": {
+ "Open LLM Leaderboard": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "acc_norm,none": 0.8359193864811842,
+ "acc_norm_stderr,none": 0.00345585439013916,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.6514042969190568,
+ "acc_stderr,none": 0.0027599234131481932,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "alias": "Open LLM Leaderboard"
+ },
+ "arc_challenge": {
+ "acc,none": 0.6382252559726962,
+ "acc_stderr,none": 0.014041957945038085,
+ "acc_norm,none": 0.6689419795221843,
+ "acc_norm_stderr,none": 0.013752062419817841,
+ "alias": " - arc_challenge"
+ },
+ "gsm8k": {
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "alias": " - gsm8k"
+ },
+ "hellaswag": {
+ "acc,none": 0.6691894045010954,
+ "acc_stderr,none": 0.00469543410395854,
+ "acc_norm,none": 0.8554072893845848,
+ "acc_norm_stderr,none": 0.0035097096477919466,
+ "alias": " - hellaswag"
+ },
+ "mmlu": {
+ "acc,none": 0.6364477994587665,
+ "acc_stderr,none": 0.0038271694585367516,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.5895855472901169,
+ "acc_stderr,none": 0.006760272274548804
+ },
+ "mmlu_formal_logic": {
+ "alias": " - formal_logic",
+ "acc,none": 0.4444444444444444,
+ "acc_stderr,none": 0.04444444444444449
+ },
+ "mmlu_high_school_european_history": {
+ "alias": " - high_school_european_history",
+ "acc,none": 0.7696969696969697,
+ "acc_stderr,none": 0.0328766675860349
+ },
+ "mmlu_high_school_us_history": {
+ "alias": " - high_school_us_history",
+ "acc,none": 0.8333333333333334,
+ "acc_stderr,none": 0.026156867523931055
+ },
+ "mmlu_high_school_world_history": {
+ "alias": " - high_school_world_history",
+ "acc,none": 0.8185654008438819,
+ "acc_stderr,none": 0.02508596114457965
+ },
+ "mmlu_international_law": {
+ "alias": " - international_law",
+ "acc,none": 0.7933884297520661,
+ "acc_stderr,none": 0.03695980128098823
+ },
+ "mmlu_jurisprudence": {
+ "alias": " - jurisprudence",
+ "acc,none": 0.7962962962962963,
+ "acc_stderr,none": 0.03893542518824847
+ },
+ "mmlu_logical_fallacies": {
+ "alias": " - logical_fallacies",
+ "acc,none": 0.7668711656441718,
+ "acc_stderr,none": 0.033220157957767414
+ },
+ "mmlu_moral_disputes": {
+ "alias": " - moral_disputes",
+ "acc,none": 0.7138728323699421,
+ "acc_stderr,none": 0.024332146779134128
+ },
+ "mmlu_moral_scenarios": {
+ "alias": " - moral_scenarios",
+ "acc,none": 0.39664804469273746,
+ "acc_stderr,none": 0.01636135476982247
+ },
+ "mmlu_philosophy": {
+ "alias": " - philosophy",
+ "acc,none": 0.6913183279742765,
+ "acc_stderr,none": 0.02623696588115326
+ },
+ "mmlu_prehistory": {
+ "alias": " - prehistory",
+ "acc,none": 0.7345679012345679,
+ "acc_stderr,none": 0.02456922360046085
+ },
+ "mmlu_professional_law": {
+ "alias": " - professional_law",
+ "acc,none": 0.46936114732724904,
+ "acc_stderr,none": 0.012746237711716634
+ },
+ "mmlu_world_religions": {
+ "alias": " - world_religions",
+ "acc,none": 0.847953216374269,
+ "acc_stderr,none": 0.02753912288906145
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.7055037013196009,
+ "acc_stderr,none": 0.007845586852292294
+ },
+ "mmlu_business_ethics": {
+ "alias": " - business_ethics",
+ "acc,none": 0.58,
+ "acc_stderr,none": 0.049604496374885836
+ },
+ "mmlu_clinical_knowledge": {
+ "alias": " - clinical_knowledge",
+ "acc,none": 0.7018867924528301,
+ "acc_stderr,none": 0.028152837942493868
+ },
+ "mmlu_college_medicine": {
+ "alias": " - college_medicine",
+ "acc,none": 0.6647398843930635,
+ "acc_stderr,none": 0.03599586301247077
+ },
+ "mmlu_global_facts": {
+ "alias": " - global_facts",
+ "acc,none": 0.38,
+ "acc_stderr,none": 0.04878317312145632
+ },
+ "mmlu_human_aging": {
+ "alias": " - human_aging",
+ "acc,none": 0.6771300448430493,
+ "acc_stderr,none": 0.031381476375755
+ },
+ "mmlu_management": {
+ "alias": " - management",
+ "acc,none": 0.7864077669902912,
+ "acc_stderr,none": 0.04058042015646034
+ },
+ "mmlu_marketing": {
+ "alias": " - marketing",
+ "acc,none": 0.8803418803418803,
+ "acc_stderr,none": 0.021262719400406964
+ },
+ "mmlu_medical_genetics": {
+ "alias": " - medical_genetics",
+ "acc,none": 0.71,
+ "acc_stderr,none": 0.045604802157206845
+ },
+ "mmlu_miscellaneous": {
+ "alias": " - miscellaneous",
+ "acc,none": 0.8326947637292464,
+ "acc_stderr,none": 0.01334732720292033
+ },
+ "mmlu_nutrition": {
+ "alias": " - nutrition",
+ "acc,none": 0.7352941176470589,
+ "acc_stderr,none": 0.025261691219729494
+ },
+ "mmlu_professional_accounting": {
+ "alias": " - professional_accounting",
+ "acc,none": 0.46808510638297873,
+ "acc_stderr,none": 0.02976667507587387
+ },
+ "mmlu_professional_medicine": {
+ "alias": " - professional_medicine",
+ "acc,none": 0.6875,
+ "acc_stderr,none": 0.02815637344037142
+ },
+ "mmlu_virology": {
+ "alias": " - virology",
+ "acc,none": 0.5421686746987951,
+ "acc_stderr,none": 0.038786267710023595
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7432564185895353,
+ "acc_stderr,none": 0.007701333272557918
+ },
+ "mmlu_econometrics": {
+ "alias": " - econometrics",
+ "acc,none": 0.5087719298245614,
+ "acc_stderr,none": 0.04702880432049615
+ },
+ "mmlu_high_school_geography": {
+ "alias": " - high_school_geography",
+ "acc,none": 0.8181818181818182,
+ "acc_stderr,none": 0.027479603010538804
+ },
+ "mmlu_high_school_government_and_politics": {
+ "alias": " - high_school_government_and_politics",
+ "acc,none": 0.9015544041450777,
+ "acc_stderr,none": 0.021500249576033463
+ },
+ "mmlu_high_school_macroeconomics": {
+ "alias": " - high_school_macroeconomics",
+ "acc,none": 0.658974358974359,
+ "acc_stderr,none": 0.02403548967633508
+ },
+ "mmlu_high_school_microeconomics": {
+ "alias": " - high_school_microeconomics",
+ "acc,none": 0.6638655462184874,
+ "acc_stderr,none": 0.03068473711513537
+ },
+ "mmlu_high_school_psychology": {
+ "alias": " - high_school_psychology",
+ "acc,none": 0.8311926605504587,
+ "acc_stderr,none": 0.016060056268530368
+ },
+ "mmlu_human_sexuality": {
+ "alias": " - human_sexuality",
+ "acc,none": 0.7862595419847328,
+ "acc_stderr,none": 0.0359546161177469
+ },
+ "mmlu_professional_psychology": {
+ "alias": " - professional_psychology",
+ "acc,none": 0.6699346405228758,
+ "acc_stderr,none": 0.01902372616072455
+ },
+ "mmlu_public_relations": {
+ "alias": " - public_relations",
+ "acc,none": 0.6909090909090909,
+ "acc_stderr,none": 0.044262946482000985
+ },
+ "mmlu_security_studies": {
+ "alias": " - security_studies",
+ "acc,none": 0.7387755102040816,
+ "acc_stderr,none": 0.02812342933514278
+ },
+ "mmlu_sociology": {
+ "alias": " - sociology",
+ "acc,none": 0.8407960199004975,
+ "acc_stderr,none": 0.02587064676616913
+ },
+ "mmlu_us_foreign_policy": {
+ "alias": " - us_foreign_policy",
+ "acc,none": 0.86,
+ "acc_stderr,none": 0.03487350880197768
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5340945131620679,
+ "acc_stderr,none": 0.008514164103258936
+ },
+ "mmlu_abstract_algebra": {
+ "alias": " - abstract_algebra",
+ "acc,none": 0.38,
+ "acc_stderr,none": 0.048783173121456316
+ },
+ "mmlu_anatomy": {
+ "alias": " - anatomy",
+ "acc,none": 0.6222222222222222,
+ "acc_stderr,none": 0.04188307537595853
+ },
+ "mmlu_astronomy": {
+ "alias": " - astronomy",
+ "acc,none": 0.6907894736842105,
+ "acc_stderr,none": 0.037610708698674805
+ },
+ "mmlu_college_biology": {
+ "alias": " - college_biology",
+ "acc,none": 0.7569444444444444,
+ "acc_stderr,none": 0.03586879280080341
+ },
+ "mmlu_college_chemistry": {
+ "alias": " - college_chemistry",
+ "acc,none": 0.42,
+ "acc_stderr,none": 0.049604496374885836
+ },
+ "mmlu_college_computer_science": {
+ "alias": " - college_computer_science",
+ "acc,none": 0.54,
+ "acc_stderr,none": 0.05009082659620332
+ },
+ "mmlu_college_mathematics": {
+ "alias": " - college_mathematics",
+ "acc,none": 0.32,
+ "acc_stderr,none": 0.04688261722621505
+ },
+ "mmlu_college_physics": {
+ "alias": " - college_physics",
+ "acc,none": 0.4411764705882353,
+ "acc_stderr,none": 0.04940635630605659
+ },
+ "mmlu_computer_security": {
+ "alias": " - computer_security",
+ "acc,none": 0.79,
+ "acc_stderr,none": 0.040936018074033256
+ },
+ "mmlu_conceptual_physics": {
+ "alias": " - conceptual_physics",
+ "acc,none": 0.574468085106383,
+ "acc_stderr,none": 0.03232146916224468
+ },
+ "mmlu_electrical_engineering": {
+ "alias": " - electrical_engineering",
+ "acc,none": 0.5862068965517241,
+ "acc_stderr,none": 0.04104269211806232
+ },
+ "mmlu_elementary_mathematics": {
+ "alias": " - elementary_mathematics",
+ "acc,none": 0.3915343915343915,
+ "acc_stderr,none": 0.02513809138885111
+ },
+ "mmlu_high_school_biology": {
+ "alias": " - high_school_biology",
+ "acc,none": 0.7645161290322581,
+ "acc_stderr,none": 0.02413763242933771
+ },
+ "mmlu_high_school_chemistry": {
+ "alias": " - high_school_chemistry",
+ "acc,none": 0.5172413793103449,
+ "acc_stderr,none": 0.035158955511656986
+ },
+ "mmlu_high_school_computer_science": {
+ "alias": " - high_school_computer_science",
+ "acc,none": 0.71,
+ "acc_stderr,none": 0.045604802157206845
+ },
+ "mmlu_high_school_mathematics": {
+ "alias": " - high_school_mathematics",
+ "acc,none": 0.3592592592592593,
+ "acc_stderr,none": 0.029252905927251972
+ },
+ "mmlu_high_school_physics": {
+ "alias": " - high_school_physics",
+ "acc,none": 0.37748344370860926,
+ "acc_stderr,none": 0.0395802723112157
+ },
+ "mmlu_high_school_statistics": {
+ "alias": " - high_school_statistics",
+ "acc,none": 0.49074074074074076,
+ "acc_stderr,none": 0.034093869469927006
+ },
+ "mmlu_machine_learning": {
+ "alias": " - machine_learning",
+ "acc,none": 0.49107142857142855,
+ "acc_stderr,none": 0.04745033255489123
+ },
+ "truthfulqa": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.5182294709510535,
+ "acc_stderr,none": 0.011594047810301133,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "alias": " - truthfulqa"
+ },
+ "truthfulqa_gen": {
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.01744454444766118,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776224,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "alias": " - truthfulqa_gen"
+ },
+ "truthfulqa_mc1": {
+ "acc,none": 0.42962056303549573,
+ "acc_stderr,none": 0.017329234580409095,
+ "alias": " - truthfulqa_mc1"
+ },
+ "truthfulqa_mc2": {
+ "acc,none": 0.6068383788666114,
+ "acc_stderr,none": 0.01540731668290581,
+ "alias": " - truthfulqa_mc2"
+ },
+ "winogrande": {
+ "acc,none": 0.7742699289660616,
+ "acc_stderr,none": 0.011749626260902557,
+ "alias": " - winogrande"
+ },
+ "eq_bench": {
+ "eqbench,none": 71.54290317887124,
+ "eqbench_stderr,none": 2.0457017558365664,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "groups": {
+ "Open LLM Leaderboard": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "acc_norm,none": 0.8359193864811842,
+ "acc_norm_stderr,none": 0.00345585439013916,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.6514042969190568,
+ "acc_stderr,none": 0.0027599234131481932,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "alias": "Open LLM Leaderboard"
+ },
+ "mmlu": {
+ "acc,none": 0.6364477994587665,
+ "acc_stderr,none": 0.0038271694585367516,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.5895855472901169,
+ "acc_stderr,none": 0.006760272274548804
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.7055037013196009,
+ "acc_stderr,none": 0.007845586852292294
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7432564185895353,
+ "acc_stderr,none": 0.007701333272557918
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5340945131620679,
+ "acc_stderr,none": 0.008514164103258936
+ },
+ "truthfulqa": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.5182294709510535,
+ "acc_stderr,none": 0.011594047810301133,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "alias": " - truthfulqa"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "truthfulqa": [
+ "truthfulqa_gen",
+ "truthfulqa_mc1",
+ "truthfulqa_mc2"
+ ],
+ "mmlu_stem": [
+ "mmlu_high_school_chemistry",
+ "mmlu_college_physics",
+ "mmlu_college_mathematics",
+ "mmlu_astronomy",
+ "mmlu_high_school_physics",
+ "mmlu_computer_security",
+ "mmlu_elementary_mathematics",
+ "mmlu_electrical_engineering",
+ "mmlu_college_biology",
+ "mmlu_machine_learning",
+ "mmlu_high_school_biology",
+ "mmlu_high_school_mathematics",
+ "mmlu_anatomy",
+ "mmlu_high_school_statistics",
+ "mmlu_college_chemistry",
+ "mmlu_conceptual_physics",
+ "mmlu_high_school_computer_science",
+ "mmlu_college_computer_science",
+ "mmlu_abstract_algebra"
+ ],
+ "mmlu_other": [
+ "mmlu_professional_medicine",
+ "mmlu_professional_accounting",
+ "mmlu_management",
+ "mmlu_global_facts",
+ "mmlu_college_medicine",
+ "mmlu_business_ethics",
+ "mmlu_nutrition",
+ "mmlu_medical_genetics",
+ "mmlu_virology",
+ "mmlu_human_aging",
+ "mmlu_clinical_knowledge",
+ "mmlu_miscellaneous",
+ "mmlu_marketing"
+ ],
+ "mmlu_social_sciences": [
+ "mmlu_high_school_psychology",
+ "mmlu_sociology",
+ "mmlu_high_school_government_and_politics",
+ "mmlu_public_relations",
+ "mmlu_high_school_macroeconomics",
+ "mmlu_high_school_geography",
+ "mmlu_high_school_microeconomics",
+ "mmlu_security_studies",
+ "mmlu_us_foreign_policy",
+ "mmlu_professional_psychology",
+ "mmlu_human_sexuality",
+ "mmlu_econometrics"
+ ],
+ "mmlu_humanities": [
+ "mmlu_high_school_european_history",
+ "mmlu_formal_logic",
+ "mmlu_moral_scenarios",
+ "mmlu_moral_disputes",
+ "mmlu_world_religions",
+ "mmlu_high_school_world_history",
+ "mmlu_logical_fallacies",
+ "mmlu_international_law",
+ "mmlu_philosophy",
+ "mmlu_professional_law",
+ "mmlu_high_school_us_history",
+ "mmlu_prehistory",
+ "mmlu_jurisprudence"
+ ],
+ "mmlu": [
+ "mmlu_humanities",
+ "mmlu_social_sciences",
+ "mmlu_other",
+ "mmlu_stem"
+ ],
+ "Open LLM Leaderboard": [
+ "gsm8k",
+ "winogrande",
+ "mmlu",
+ "truthfulqa",
+ "hellaswag",
+ "arc_challenge"
+ ]
+ },
+ "configs": {
+ "arc_challenge": {
+ "task": "arc_challenge",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "allenai/ai2_arc",
+ "dataset_name": "ARC-Challenge",
+ "training_split": "train",
+ "validation_split": "validation",
+ "test_split": "test",
+ "fewshot_split": "validation",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
+ "doc_to_choice": "{{choices.text}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 25,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "gsm8k": {
+ "task": "gsm8k",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "gsm8k",
+ "dataset_name": "main",
+ "training_split": "train",
+ "test_split": "test",
+ "fewshot_split": "train",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{answer}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "exact_match",
+ "aggregation": "mean",
+ "higher_is_better": true,
+ "ignore_case": true,
+ "ignore_punctuation": false,
+ "regexes_to_ignore": [
+ ",",
+ "\\$",
+ "(?s).*#### ",
+ "\\.$"
+ ]
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "Question:",
+ "",
+ "<|im_end|>"
+ ],
+ "do_sample": false,
+ "temperature": 0.0
+ },
+ "repeats": 1,
+ "filter_list": [
+ {
+ "name": "strict-match",
+ "filter": [
+ {
+ "function": "regex",
+ "regex_pattern": "#### (\\-?[0-9\\.\\,]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ },
+ {
+ "name": "flexible-extract",
+ "filter": [
+ {
+ "function": "regex",
+ "group_select": -1,
+ "regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ }
+ ],
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 10,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "mmlu_abstract_algebra": {
+ "task": "mmlu_abstract_algebra",
+ "task_alias": "abstract_algebra",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "abstract_algebra",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_anatomy": {
+ "task": "mmlu_anatomy",
+ "task_alias": "anatomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "anatomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_astronomy": {
+ "task": "mmlu_astronomy",
+ "task_alias": "astronomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "astronomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_business_ethics": {
+ "task": "mmlu_business_ethics",
+ "task_alias": "business_ethics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "business_ethics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_clinical_knowledge": {
+ "task": "mmlu_clinical_knowledge",
+ "task_alias": "clinical_knowledge",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "clinical_knowledge",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_biology": {
+ "task": "mmlu_college_biology",
+ "task_alias": "college_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_chemistry": {
+ "task": "mmlu_college_chemistry",
+ "task_alias": "college_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_computer_science": {
+ "task": "mmlu_college_computer_science",
+ "task_alias": "college_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_mathematics": {
+ "task": "mmlu_college_mathematics",
+ "task_alias": "college_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_medicine": {
+ "task": "mmlu_college_medicine",
+ "task_alias": "college_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_physics": {
+ "task": "mmlu_college_physics",
+ "task_alias": "college_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_computer_security": {
+ "task": "mmlu_computer_security",
+ "task_alias": "computer_security",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "computer_security",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_conceptual_physics": {
+ "task": "mmlu_conceptual_physics",
+ "task_alias": "conceptual_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "conceptual_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_econometrics": {
+ "task": "mmlu_econometrics",
+ "task_alias": "econometrics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "econometrics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_electrical_engineering": {
+ "task": "mmlu_electrical_engineering",
+ "task_alias": "electrical_engineering",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "electrical_engineering",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_elementary_mathematics": {
+ "task": "mmlu_elementary_mathematics",
+ "task_alias": "elementary_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "elementary_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_formal_logic": {
+ "task": "mmlu_formal_logic",
+ "task_alias": "formal_logic",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "formal_logic",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_global_facts": {
+ "task": "mmlu_global_facts",
+ "task_alias": "global_facts",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "global_facts",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_biology": {
+ "task": "mmlu_high_school_biology",
+ "task_alias": "high_school_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_chemistry": {
+ "task": "mmlu_high_school_chemistry",
+ "task_alias": "high_school_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_computer_science": {
+ "task": "mmlu_high_school_computer_science",
+ "task_alias": "high_school_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_european_history": {
+ "task": "mmlu_high_school_european_history",
+ "task_alias": "high_school_european_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_european_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_geography": {
+ "task": "mmlu_high_school_geography",
+ "task_alias": "high_school_geography",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_geography",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_government_and_politics": {
+ "task": "mmlu_high_school_government_and_politics",
+ "task_alias": "high_school_government_and_politics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_government_and_politics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_macroeconomics": {
+ "task": "mmlu_high_school_macroeconomics",
+ "task_alias": "high_school_macroeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_macroeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_mathematics": {
+ "task": "mmlu_high_school_mathematics",
+ "task_alias": "high_school_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_microeconomics": {
+ "task": "mmlu_high_school_microeconomics",
+ "task_alias": "high_school_microeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_microeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_physics": {
+ "task": "mmlu_high_school_physics",
+ "task_alias": "high_school_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_psychology": {
+ "task": "mmlu_high_school_psychology",
+ "task_alias": "high_school_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_statistics": {
+ "task": "mmlu_high_school_statistics",
+ "task_alias": "high_school_statistics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_statistics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_us_history": {
+ "task": "mmlu_high_school_us_history",
+ "task_alias": "high_school_us_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_us_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_world_history": {
+ "task": "mmlu_high_school_world_history",
+ "task_alias": "high_school_world_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_world_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_aging": {
+ "task": "mmlu_human_aging",
+ "task_alias": "human_aging",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_aging",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_sexuality": {
+ "task": "mmlu_human_sexuality",
+ "task_alias": "human_sexuality",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_sexuality",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_international_law": {
+ "task": "mmlu_international_law",
+ "task_alias": "international_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "international_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_jurisprudence": {
+ "task": "mmlu_jurisprudence",
+ "task_alias": "jurisprudence",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "jurisprudence",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_logical_fallacies": {
+ "task": "mmlu_logical_fallacies",
+ "task_alias": "logical_fallacies",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "logical_fallacies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_machine_learning": {
+ "task": "mmlu_machine_learning",
+ "task_alias": "machine_learning",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "machine_learning",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_management": {
+ "task": "mmlu_management",
+ "task_alias": "management",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "management",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_marketing": {
+ "task": "mmlu_marketing",
+ "task_alias": "marketing",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "marketing",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_medical_genetics": {
+ "task": "mmlu_medical_genetics",
+ "task_alias": "medical_genetics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "medical_genetics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_miscellaneous": {
+ "task": "mmlu_miscellaneous",
+ "task_alias": "miscellaneous",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "miscellaneous",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_disputes": {
+ "task": "mmlu_moral_disputes",
+ "task_alias": "moral_disputes",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_disputes",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_scenarios": {
+ "task": "mmlu_moral_scenarios",
+ "task_alias": "moral_scenarios",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_scenarios",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_nutrition": {
+ "task": "mmlu_nutrition",
+ "task_alias": "nutrition",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "nutrition",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_philosophy": {
+ "task": "mmlu_philosophy",
+ "task_alias": "philosophy",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "philosophy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_prehistory": {
+ "task": "mmlu_prehistory",
+ "task_alias": "prehistory",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "prehistory",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_accounting": {
+ "task": "mmlu_professional_accounting",
+ "task_alias": "professional_accounting",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_accounting",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_law": {
+ "task": "mmlu_professional_law",
+ "task_alias": "professional_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_medicine": {
+ "task": "mmlu_professional_medicine",
+ "task_alias": "professional_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_psychology": {
+ "task": "mmlu_professional_psychology",
+ "task_alias": "professional_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_public_relations": {
+ "task": "mmlu_public_relations",
+ "task_alias": "public_relations",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "public_relations",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_security_studies": {
+ "task": "mmlu_security_studies",
+ "task_alias": "security_studies",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "security_studies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_sociology": {
+ "task": "mmlu_sociology",
+ "task_alias": "sociology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "sociology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_us_foreign_policy": {
+ "task": "mmlu_us_foreign_policy",
+ "task_alias": "us_foreign_policy",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "us_foreign_policy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_virology": {
+ "task": "mmlu_virology",
+ "task_alias": "virology",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "virology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_world_religions": {
+ "task": "mmlu_world_religions",
+ "task_alias": "world_religions",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "world_religions",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "truthfulqa_gen": {
+ "task": "truthfulqa_gen",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "generation",
+ "validation_split": "validation",
+ "process_docs": "def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:\n return dataset.map(preprocess_function)\n",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question}}",
+ "doc_to_target": " ",
+ "process_results": "def process_results_gen(doc, results):\n completion = results[0]\n true_refs, false_refs = doc[\"correct_answers\"], doc[\"incorrect_answers\"]\n all_refs = true_refs + false_refs\n\n # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.\n\n # # BLEURT\n # bleurt_scores_true = self.bleurt.compute(\n # predictions=[completion] * len(true_refs), references=true_refs\n # )[\"scores\"]\n # bleurt_scores_false = self.bleurt.compute(\n # predictions=[completion] * len(false_refs), references=false_refs\n # )[\"scores\"]\n # bleurt_correct = max(bleurt_scores_true)\n # bleurt_incorrect = max(bleurt_scores_false)\n # bleurt_max = bleurt_correct\n # bleurt_diff = bleurt_correct - bleurt_incorrect\n # bleurt_acc = int(bleurt_correct > bleurt_incorrect)\n\n # BLEU\n bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]\n bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])\n bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])\n bleu_max = bleu_correct\n bleu_diff = bleu_correct - bleu_incorrect\n bleu_acc = int(bleu_correct > bleu_incorrect)\n\n # ROUGE-N\n rouge_scores = [rouge([ref], [completion]) for ref in all_refs]\n # ROUGE-1\n rouge1_scores = [score[\"rouge1\"] for score in rouge_scores]\n rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])\n rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])\n rouge1_max = rouge1_correct\n rouge1_diff = rouge1_correct - rouge1_incorrect\n rouge1_acc = int(rouge1_correct > rouge1_incorrect)\n # ROUGE-2\n rouge2_scores = [score[\"rouge2\"] for score in rouge_scores]\n rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])\n rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])\n rouge2_max = rouge2_correct\n rouge2_diff = rouge2_correct - rouge2_incorrect\n rouge2_acc = int(rouge2_correct > rouge2_incorrect)\n # ROUGE-L\n rougeL_scores = [score[\"rougeLsum\"] for score in rouge_scores]\n rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])\n rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])\n rougeL_max = rougeL_correct\n rougeL_diff = rougeL_correct - rougeL_incorrect\n rougeL_acc = int(rougeL_correct > rougeL_incorrect)\n\n return {\n # \"bleurt_max\": bleurt_max,\n # \"bleurt_acc\": bleurt_acc,\n # \"bleurt_diff\": bleurt_diff,\n \"bleu_max\": bleu_max,\n \"bleu_acc\": bleu_acc,\n \"bleu_diff\": bleu_diff,\n \"rouge1_max\": rouge1_max,\n \"rouge1_acc\": rouge1_acc,\n \"rouge1_diff\": rouge1_diff,\n \"rouge2_max\": rouge2_max,\n \"rouge2_acc\": rouge2_acc,\n \"rouge2_diff\": rouge2_diff,\n \"rougeL_max\": rougeL_max,\n \"rougeL_acc\": rougeL_acc,\n \"rougeL_diff\": rougeL_diff,\n }\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "bleu_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "\n\n"
+ ],
+ "do_sample": false
+ },
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "truthfulqa_mc1": {
+ "task": "truthfulqa_mc1",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc1_targets.choices}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "truthfulqa_mc2": {
+ "task": "truthfulqa_mc2",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc2_targets.choices}}",
+ "process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "winogrande": {
+ "task": "winogrande",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "winogrande",
+ "dataset_name": "winogrande_xl",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
+ "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
+ "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "sentence",
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "arc_challenge": 1.0,
+ "eq_bench": 2.1,
+ "gsm8k": 3.0,
+ "hellaswag": 1.0,
+ "mmlu_abstract_algebra": 0.0,
+ "mmlu_anatomy": 0.0,
+ "mmlu_astronomy": 0.0,
+ "mmlu_business_ethics": 0.0,
+ "mmlu_clinical_knowledge": 0.0,
+ "mmlu_college_biology": 0.0,
+ "mmlu_college_chemistry": 0.0,
+ "mmlu_college_computer_science": 0.0,
+ "mmlu_college_mathematics": 0.0,
+ "mmlu_college_medicine": 0.0,
+ "mmlu_college_physics": 0.0,
+ "mmlu_computer_security": 0.0,
+ "mmlu_conceptual_physics": 0.0,
+ "mmlu_econometrics": 0.0,
+ "mmlu_electrical_engineering": 0.0,
+ "mmlu_elementary_mathematics": 0.0,
+ "mmlu_formal_logic": 0.0,
+ "mmlu_global_facts": 0.0,
+ "mmlu_high_school_biology": 0.0,
+ "mmlu_high_school_chemistry": 0.0,
+ "mmlu_high_school_computer_science": 0.0,
+ "mmlu_high_school_european_history": 0.0,
+ "mmlu_high_school_geography": 0.0,
+ "mmlu_high_school_government_and_politics": 0.0,
+ "mmlu_high_school_macroeconomics": 0.0,
+ "mmlu_high_school_mathematics": 0.0,
+ "mmlu_high_school_microeconomics": 0.0,
+ "mmlu_high_school_physics": 0.0,
+ "mmlu_high_school_psychology": 0.0,
+ "mmlu_high_school_statistics": 0.0,
+ "mmlu_high_school_us_history": 0.0,
+ "mmlu_high_school_world_history": 0.0,
+ "mmlu_human_aging": 0.0,
+ "mmlu_human_sexuality": 0.0,
+ "mmlu_international_law": 0.0,
+ "mmlu_jurisprudence": 0.0,
+ "mmlu_logical_fallacies": 0.0,
+ "mmlu_machine_learning": 0.0,
+ "mmlu_management": 0.0,
+ "mmlu_marketing": 0.0,
+ "mmlu_medical_genetics": 0.0,
+ "mmlu_miscellaneous": 0.0,
+ "mmlu_moral_disputes": 0.0,
+ "mmlu_moral_scenarios": 0.0,
+ "mmlu_nutrition": 0.0,
+ "mmlu_philosophy": 0.0,
+ "mmlu_prehistory": 0.0,
+ "mmlu_professional_accounting": 0.0,
+ "mmlu_professional_law": 0.0,
+ "mmlu_professional_medicine": 0.0,
+ "mmlu_professional_psychology": 0.0,
+ "mmlu_public_relations": 0.0,
+ "mmlu_security_studies": 0.0,
+ "mmlu_sociology": 0.0,
+ "mmlu_us_foreign_policy": 0.0,
+ "mmlu_virology": 0.0,
+ "mmlu_world_religions": 0.0,
+ "truthfulqa_gen": 3.0,
+ "truthfulqa_mc1": 2.0,
+ "truthfulqa_mc2": 2.0,
+ "winogrande": 1.0
+ },
+ "n-shot": {
+ "Open LLM Leaderboard": 5,
+ "arc_challenge": 25,
+ "eq_bench": 0,
+ "gsm8k": 5,
+ "hellaswag": 10,
+ "mmlu": 0,
+ "mmlu_abstract_algebra": 5,
+ "mmlu_anatomy": 5,
+ "mmlu_astronomy": 5,
+ "mmlu_business_ethics": 5,
+ "mmlu_clinical_knowledge": 5,
+ "mmlu_college_biology": 5,
+ "mmlu_college_chemistry": 5,
+ "mmlu_college_computer_science": 5,
+ "mmlu_college_mathematics": 5,
+ "mmlu_college_medicine": 5,
+ "mmlu_college_physics": 5,
+ "mmlu_computer_security": 5,
+ "mmlu_conceptual_physics": 5,
+ "mmlu_econometrics": 5,
+ "mmlu_electrical_engineering": 5,
+ "mmlu_elementary_mathematics": 5,
+ "mmlu_formal_logic": 5,
+ "mmlu_global_facts": 5,
+ "mmlu_high_school_biology": 5,
+ "mmlu_high_school_chemistry": 5,
+ "mmlu_high_school_computer_science": 5,
+ "mmlu_high_school_european_history": 5,
+ "mmlu_high_school_geography": 5,
+ "mmlu_high_school_government_and_politics": 5,
+ "mmlu_high_school_macroeconomics": 5,
+ "mmlu_high_school_mathematics": 5,
+ "mmlu_high_school_microeconomics": 5,
+ "mmlu_high_school_physics": 5,
+ "mmlu_high_school_psychology": 5,
+ "mmlu_high_school_statistics": 5,
+ "mmlu_high_school_us_history": 5,
+ "mmlu_high_school_world_history": 5,
+ "mmlu_human_aging": 5,
+ "mmlu_human_sexuality": 5,
+ "mmlu_humanities": 5,
+ "mmlu_international_law": 5,
+ "mmlu_jurisprudence": 5,
+ "mmlu_logical_fallacies": 5,
+ "mmlu_machine_learning": 5,
+ "mmlu_management": 5,
+ "mmlu_marketing": 5,
+ "mmlu_medical_genetics": 5,
+ "mmlu_miscellaneous": 5,
+ "mmlu_moral_disputes": 5,
+ "mmlu_moral_scenarios": 5,
+ "mmlu_nutrition": 5,
+ "mmlu_other": 5,
+ "mmlu_philosophy": 5,
+ "mmlu_prehistory": 5,
+ "mmlu_professional_accounting": 5,
+ "mmlu_professional_law": 5,
+ "mmlu_professional_medicine": 5,
+ "mmlu_professional_psychology": 5,
+ "mmlu_public_relations": 5,
+ "mmlu_security_studies": 5,
+ "mmlu_social_sciences": 5,
+ "mmlu_sociology": 5,
+ "mmlu_stem": 5,
+ "mmlu_us_foreign_policy": 5,
+ "mmlu_virology": 5,
+ "mmlu_world_religions": 5,
+ "truthfulqa": 0,
+ "truthfulqa_gen": 0,
+ "truthfulqa_mc1": 0,
+ "truthfulqa_mc2": 0,
+ "winogrande": 5
+ },
+ "higher_is_better": {
+ "Open LLM Leaderboard": {
+ "exact_match": true,
+ "acc": true,
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc_norm": true
+ },
+ "arc_challenge": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "gsm8k": {
+ "exact_match": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "mmlu": {
+ "acc": true
+ },
+ "mmlu_abstract_algebra": {
+ "acc": true
+ },
+ "mmlu_anatomy": {
+ "acc": true
+ },
+ "mmlu_astronomy": {
+ "acc": true
+ },
+ "mmlu_business_ethics": {
+ "acc": true
+ },
+ "mmlu_clinical_knowledge": {
+ "acc": true
+ },
+ "mmlu_college_biology": {
+ "acc": true
+ },
+ "mmlu_college_chemistry": {
+ "acc": true
+ },
+ "mmlu_college_computer_science": {
+ "acc": true
+ },
+ "mmlu_college_mathematics": {
+ "acc": true
+ },
+ "mmlu_college_medicine": {
+ "acc": true
+ },
+ "mmlu_college_physics": {
+ "acc": true
+ },
+ "mmlu_computer_security": {
+ "acc": true
+ },
+ "mmlu_conceptual_physics": {
+ "acc": true
+ },
+ "mmlu_econometrics": {
+ "acc": true
+ },
+ "mmlu_electrical_engineering": {
+ "acc": true
+ },
+ "mmlu_elementary_mathematics": {
+ "acc": true
+ },
+ "mmlu_formal_logic": {
+ "acc": true
+ },
+ "mmlu_global_facts": {
+ "acc": true
+ },
+ "mmlu_high_school_biology": {
+ "acc": true
+ },
+ "mmlu_high_school_chemistry": {
+ "acc": true
+ },
+ "mmlu_high_school_computer_science": {
+ "acc": true
+ },
+ "mmlu_high_school_european_history": {
+ "acc": true
+ },
+ "mmlu_high_school_geography": {
+ "acc": true
+ },
+ "mmlu_high_school_government_and_politics": {
+ "acc": true
+ },
+ "mmlu_high_school_macroeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_mathematics": {
+ "acc": true
+ },
+ "mmlu_high_school_microeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_physics": {
+ "acc": true
+ },
+ "mmlu_high_school_psychology": {
+ "acc": true
+ },
+ "mmlu_high_school_statistics": {
+ "acc": true
+ },
+ "mmlu_high_school_us_history": {
+ "acc": true
+ },
+ "mmlu_high_school_world_history": {
+ "acc": true
+ },
+ "mmlu_human_aging": {
+ "acc": true
+ },
+ "mmlu_human_sexuality": {
+ "acc": true
+ },
+ "mmlu_humanities": {
+ "acc": true
+ },
+ "mmlu_international_law": {
+ "acc": true
+ },
+ "mmlu_jurisprudence": {
+ "acc": true
+ },
+ "mmlu_logical_fallacies": {
+ "acc": true
+ },
+ "mmlu_machine_learning": {
+ "acc": true
+ },
+ "mmlu_management": {
+ "acc": true
+ },
+ "mmlu_marketing": {
+ "acc": true
+ },
+ "mmlu_medical_genetics": {
+ "acc": true
+ },
+ "mmlu_miscellaneous": {
+ "acc": true
+ },
+ "mmlu_moral_disputes": {
+ "acc": true
+ },
+ "mmlu_moral_scenarios": {
+ "acc": true
+ },
+ "mmlu_nutrition": {
+ "acc": true
+ },
+ "mmlu_other": {
+ "acc": true
+ },
+ "mmlu_philosophy": {
+ "acc": true
+ },
+ "mmlu_prehistory": {
+ "acc": true
+ },
+ "mmlu_professional_accounting": {
+ "acc": true
+ },
+ "mmlu_professional_law": {
+ "acc": true
+ },
+ "mmlu_professional_medicine": {
+ "acc": true
+ },
+ "mmlu_professional_psychology": {
+ "acc": true
+ },
+ "mmlu_public_relations": {
+ "acc": true
+ },
+ "mmlu_security_studies": {
+ "acc": true
+ },
+ "mmlu_social_sciences": {
+ "acc": true
+ },
+ "mmlu_sociology": {
+ "acc": true
+ },
+ "mmlu_stem": {
+ "acc": true
+ },
+ "mmlu_us_foreign_policy": {
+ "acc": true
+ },
+ "mmlu_virology": {
+ "acc": true
+ },
+ "mmlu_world_religions": {
+ "acc": true
+ },
+ "truthfulqa": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc": true
+ },
+ "truthfulqa_gen": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true
+ },
+ "truthfulqa_mc1": {
+ "acc": true
+ },
+ "truthfulqa_mc2": {
+ "acc": true
+ },
+ "winogrande": {
+ "acc": true
+ }
+ },
+ "n-samples": {
+ "gsm8k": {
+ "original": 1319,
+ "effective": 1319
+ },
+ "winogrande": {
+ "original": 1267,
+ "effective": 1267
+ },
+ "mmlu_high_school_european_history": {
+ "original": 165,
+ "effective": 165
+ },
+ "mmlu_formal_logic": {
+ "original": 126,
+ "effective": 126
+ },
+ "mmlu_moral_scenarios": {
+ "original": 895,
+ "effective": 895
+ },
+ "mmlu_moral_disputes": {
+ "original": 346,
+ "effective": 346
+ },
+ "mmlu_world_religions": {
+ "original": 171,
+ "effective": 171
+ },
+ "mmlu_high_school_world_history": {
+ "original": 237,
+ "effective": 237
+ },
+ "mmlu_logical_fallacies": {
+ "original": 163,
+ "effective": 163
+ },
+ "mmlu_international_law": {
+ "original": 121,
+ "effective": 121
+ },
+ "mmlu_philosophy": {
+ "original": 311,
+ "effective": 311
+ },
+ "mmlu_professional_law": {
+ "original": 1534,
+ "effective": 1534
+ },
+ "mmlu_high_school_us_history": {
+ "original": 204,
+ "effective": 204
+ },
+ "mmlu_prehistory": {
+ "original": 324,
+ "effective": 324
+ },
+ "mmlu_jurisprudence": {
+ "original": 108,
+ "effective": 108
+ },
+ "mmlu_high_school_psychology": {
+ "original": 545,
+ "effective": 545
+ },
+ "mmlu_sociology": {
+ "original": 201,
+ "effective": 201
+ },
+ "mmlu_high_school_government_and_politics": {
+ "original": 193,
+ "effective": 193
+ },
+ "mmlu_public_relations": {
+ "original": 110,
+ "effective": 110
+ },
+ "mmlu_high_school_macroeconomics": {
+ "original": 390,
+ "effective": 390
+ },
+ "mmlu_high_school_geography": {
+ "original": 198,
+ "effective": 198
+ },
+ "mmlu_high_school_microeconomics": {
+ "original": 238,
+ "effective": 238
+ },
+ "mmlu_security_studies": {
+ "original": 245,
+ "effective": 245
+ },
+ "mmlu_us_foreign_policy": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_professional_psychology": {
+ "original": 612,
+ "effective": 612
+ },
+ "mmlu_human_sexuality": {
+ "original": 131,
+ "effective": 131
+ },
+ "mmlu_econometrics": {
+ "original": 114,
+ "effective": 114
+ },
+ "mmlu_professional_medicine": {
+ "original": 272,
+ "effective": 272
+ },
+ "mmlu_professional_accounting": {
+ "original": 282,
+ "effective": 282
+ },
+ "mmlu_management": {
+ "original": 103,
+ "effective": 103
+ },
+ "mmlu_global_facts": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_medicine": {
+ "original": 173,
+ "effective": 173
+ },
+ "mmlu_business_ethics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_nutrition": {
+ "original": 306,
+ "effective": 306
+ },
+ "mmlu_medical_genetics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_virology": {
+ "original": 166,
+ "effective": 166
+ },
+ "mmlu_human_aging": {
+ "original": 223,
+ "effective": 223
+ },
+ "mmlu_clinical_knowledge": {
+ "original": 265,
+ "effective": 265
+ },
+ "mmlu_miscellaneous": {
+ "original": 783,
+ "effective": 783
+ },
+ "mmlu_marketing": {
+ "original": 234,
+ "effective": 234
+ },
+ "mmlu_high_school_chemistry": {
+ "original": 203,
+ "effective": 203
+ },
+ "mmlu_college_physics": {
+ "original": 102,
+ "effective": 102
+ },
+ "mmlu_college_mathematics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_astronomy": {
+ "original": 152,
+ "effective": 152
+ },
+ "mmlu_high_school_physics": {
+ "original": 151,
+ "effective": 151
+ },
+ "mmlu_computer_security": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_elementary_mathematics": {
+ "original": 378,
+ "effective": 378
+ },
+ "mmlu_electrical_engineering": {
+ "original": 145,
+ "effective": 145
+ },
+ "mmlu_college_biology": {
+ "original": 144,
+ "effective": 144
+ },
+ "mmlu_machine_learning": {
+ "original": 112,
+ "effective": 112
+ },
+ "mmlu_high_school_biology": {
+ "original": 310,
+ "effective": 310
+ },
+ "mmlu_high_school_mathematics": {
+ "original": 270,
+ "effective": 270
+ },
+ "mmlu_anatomy": {
+ "original": 135,
+ "effective": 135
+ },
+ "mmlu_high_school_statistics": {
+ "original": 216,
+ "effective": 216
+ },
+ "mmlu_college_chemistry": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_conceptual_physics": {
+ "original": 235,
+ "effective": 235
+ },
+ "mmlu_high_school_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_abstract_algebra": {
+ "original": 100,
+ "effective": 100
+ },
+ "truthfulqa_gen": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc1": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc2": {
+ "original": 817,
+ "effective": 817
+ },
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "arc_challenge": {
+ "original": 1172,
+ "effective": 1172
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Iced-Lemon-Cookie-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e0656657a5d5cc73bc16d9852f5894f31ed7fcb5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 2
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719586774.8240964,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Iced-Lemon-Cookie-7B",
+ "model_name_sanitized": "FallenMerick__Iced-Lemon-Cookie-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 139158.078961917,
+ "end_time": 175571.331573164,
+ "total_evaluation_time_seconds": "36413.25261124701"
+}
\ No newline at end of file
diff --git a/FallenMerick__Iced-Lemon-Cookie-7B/results_2024-06-29T01-06-21.272851.json b/FallenMerick__Iced-Lemon-Cookie-7B/results_2024-06-29T01-06-21.272851.json
new file mode 100644
index 0000000000000000000000000000000000000000..f70e6cb7d4fbce6e8712b995b7155042a52477e1
--- /dev/null
+++ b/FallenMerick__Iced-Lemon-Cookie-7B/results_2024-06-29T01-06-21.272851.json
@@ -0,0 +1,4035 @@
+{
+ "results": {
+ "Open LLM Leaderboard": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "acc_norm,none": 0.8359193864811842,
+ "acc_norm_stderr,none": 0.00345585439013916,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.6514042969190568,
+ "acc_stderr,none": 0.0027599234131481932,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "alias": "Open LLM Leaderboard"
+ },
+ "arc_challenge": {
+ "acc,none": 0.6382252559726962,
+ "acc_stderr,none": 0.014041957945038085,
+ "acc_norm,none": 0.6689419795221843,
+ "acc_norm_stderr,none": 0.013752062419817841,
+ "alias": " - arc_challenge"
+ },
+ "gsm8k": {
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "alias": " - gsm8k"
+ },
+ "hellaswag": {
+ "acc,none": 0.6691894045010954,
+ "acc_stderr,none": 0.00469543410395854,
+ "acc_norm,none": 0.8554072893845848,
+ "acc_norm_stderr,none": 0.0035097096477919466,
+ "alias": " - hellaswag"
+ },
+ "mmlu": {
+ "acc,none": 0.6364477994587665,
+ "acc_stderr,none": 0.0038271694585367516,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.5895855472901169,
+ "acc_stderr,none": 0.006760272274548804
+ },
+ "mmlu_formal_logic": {
+ "alias": " - formal_logic",
+ "acc,none": 0.4444444444444444,
+ "acc_stderr,none": 0.04444444444444449
+ },
+ "mmlu_high_school_european_history": {
+ "alias": " - high_school_european_history",
+ "acc,none": 0.7696969696969697,
+ "acc_stderr,none": 0.0328766675860349
+ },
+ "mmlu_high_school_us_history": {
+ "alias": " - high_school_us_history",
+ "acc,none": 0.8333333333333334,
+ "acc_stderr,none": 0.026156867523931055
+ },
+ "mmlu_high_school_world_history": {
+ "alias": " - high_school_world_history",
+ "acc,none": 0.8185654008438819,
+ "acc_stderr,none": 0.02508596114457965
+ },
+ "mmlu_international_law": {
+ "alias": " - international_law",
+ "acc,none": 0.7933884297520661,
+ "acc_stderr,none": 0.03695980128098823
+ },
+ "mmlu_jurisprudence": {
+ "alias": " - jurisprudence",
+ "acc,none": 0.7962962962962963,
+ "acc_stderr,none": 0.03893542518824847
+ },
+ "mmlu_logical_fallacies": {
+ "alias": " - logical_fallacies",
+ "acc,none": 0.7668711656441718,
+ "acc_stderr,none": 0.033220157957767414
+ },
+ "mmlu_moral_disputes": {
+ "alias": " - moral_disputes",
+ "acc,none": 0.7138728323699421,
+ "acc_stderr,none": 0.024332146779134128
+ },
+ "mmlu_moral_scenarios": {
+ "alias": " - moral_scenarios",
+ "acc,none": 0.39664804469273746,
+ "acc_stderr,none": 0.01636135476982247
+ },
+ "mmlu_philosophy": {
+ "alias": " - philosophy",
+ "acc,none": 0.6913183279742765,
+ "acc_stderr,none": 0.02623696588115326
+ },
+ "mmlu_prehistory": {
+ "alias": " - prehistory",
+ "acc,none": 0.7345679012345679,
+ "acc_stderr,none": 0.02456922360046085
+ },
+ "mmlu_professional_law": {
+ "alias": " - professional_law",
+ "acc,none": 0.46936114732724904,
+ "acc_stderr,none": 0.012746237711716634
+ },
+ "mmlu_world_religions": {
+ "alias": " - world_religions",
+ "acc,none": 0.847953216374269,
+ "acc_stderr,none": 0.02753912288906145
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.7055037013196009,
+ "acc_stderr,none": 0.007845586852292294
+ },
+ "mmlu_business_ethics": {
+ "alias": " - business_ethics",
+ "acc,none": 0.58,
+ "acc_stderr,none": 0.049604496374885836
+ },
+ "mmlu_clinical_knowledge": {
+ "alias": " - clinical_knowledge",
+ "acc,none": 0.7018867924528301,
+ "acc_stderr,none": 0.028152837942493868
+ },
+ "mmlu_college_medicine": {
+ "alias": " - college_medicine",
+ "acc,none": 0.6647398843930635,
+ "acc_stderr,none": 0.03599586301247077
+ },
+ "mmlu_global_facts": {
+ "alias": " - global_facts",
+ "acc,none": 0.38,
+ "acc_stderr,none": 0.04878317312145632
+ },
+ "mmlu_human_aging": {
+ "alias": " - human_aging",
+ "acc,none": 0.6771300448430493,
+ "acc_stderr,none": 0.031381476375755
+ },
+ "mmlu_management": {
+ "alias": " - management",
+ "acc,none": 0.7864077669902912,
+ "acc_stderr,none": 0.04058042015646034
+ },
+ "mmlu_marketing": {
+ "alias": " - marketing",
+ "acc,none": 0.8803418803418803,
+ "acc_stderr,none": 0.021262719400406964
+ },
+ "mmlu_medical_genetics": {
+ "alias": " - medical_genetics",
+ "acc,none": 0.71,
+ "acc_stderr,none": 0.045604802157206845
+ },
+ "mmlu_miscellaneous": {
+ "alias": " - miscellaneous",
+ "acc,none": 0.8326947637292464,
+ "acc_stderr,none": 0.01334732720292033
+ },
+ "mmlu_nutrition": {
+ "alias": " - nutrition",
+ "acc,none": 0.7352941176470589,
+ "acc_stderr,none": 0.025261691219729494
+ },
+ "mmlu_professional_accounting": {
+ "alias": " - professional_accounting",
+ "acc,none": 0.46808510638297873,
+ "acc_stderr,none": 0.02976667507587387
+ },
+ "mmlu_professional_medicine": {
+ "alias": " - professional_medicine",
+ "acc,none": 0.6875,
+ "acc_stderr,none": 0.02815637344037142
+ },
+ "mmlu_virology": {
+ "alias": " - virology",
+ "acc,none": 0.5421686746987951,
+ "acc_stderr,none": 0.038786267710023595
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7432564185895353,
+ "acc_stderr,none": 0.007701333272557918
+ },
+ "mmlu_econometrics": {
+ "alias": " - econometrics",
+ "acc,none": 0.5087719298245614,
+ "acc_stderr,none": 0.04702880432049615
+ },
+ "mmlu_high_school_geography": {
+ "alias": " - high_school_geography",
+ "acc,none": 0.8181818181818182,
+ "acc_stderr,none": 0.027479603010538804
+ },
+ "mmlu_high_school_government_and_politics": {
+ "alias": " - high_school_government_and_politics",
+ "acc,none": 0.9015544041450777,
+ "acc_stderr,none": 0.021500249576033463
+ },
+ "mmlu_high_school_macroeconomics": {
+ "alias": " - high_school_macroeconomics",
+ "acc,none": 0.658974358974359,
+ "acc_stderr,none": 0.02403548967633508
+ },
+ "mmlu_high_school_microeconomics": {
+ "alias": " - high_school_microeconomics",
+ "acc,none": 0.6638655462184874,
+ "acc_stderr,none": 0.03068473711513537
+ },
+ "mmlu_high_school_psychology": {
+ "alias": " - high_school_psychology",
+ "acc,none": 0.8311926605504587,
+ "acc_stderr,none": 0.016060056268530368
+ },
+ "mmlu_human_sexuality": {
+ "alias": " - human_sexuality",
+ "acc,none": 0.7862595419847328,
+ "acc_stderr,none": 0.0359546161177469
+ },
+ "mmlu_professional_psychology": {
+ "alias": " - professional_psychology",
+ "acc,none": 0.6699346405228758,
+ "acc_stderr,none": 0.01902372616072455
+ },
+ "mmlu_public_relations": {
+ "alias": " - public_relations",
+ "acc,none": 0.6909090909090909,
+ "acc_stderr,none": 0.044262946482000985
+ },
+ "mmlu_security_studies": {
+ "alias": " - security_studies",
+ "acc,none": 0.7387755102040816,
+ "acc_stderr,none": 0.02812342933514278
+ },
+ "mmlu_sociology": {
+ "alias": " - sociology",
+ "acc,none": 0.8407960199004975,
+ "acc_stderr,none": 0.02587064676616913
+ },
+ "mmlu_us_foreign_policy": {
+ "alias": " - us_foreign_policy",
+ "acc,none": 0.86,
+ "acc_stderr,none": 0.03487350880197768
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5340945131620679,
+ "acc_stderr,none": 0.008514164103258936
+ },
+ "mmlu_abstract_algebra": {
+ "alias": " - abstract_algebra",
+ "acc,none": 0.38,
+ "acc_stderr,none": 0.048783173121456316
+ },
+ "mmlu_anatomy": {
+ "alias": " - anatomy",
+ "acc,none": 0.6222222222222222,
+ "acc_stderr,none": 0.04188307537595853
+ },
+ "mmlu_astronomy": {
+ "alias": " - astronomy",
+ "acc,none": 0.6907894736842105,
+ "acc_stderr,none": 0.037610708698674805
+ },
+ "mmlu_college_biology": {
+ "alias": " - college_biology",
+ "acc,none": 0.7569444444444444,
+ "acc_stderr,none": 0.03586879280080341
+ },
+ "mmlu_college_chemistry": {
+ "alias": " - college_chemistry",
+ "acc,none": 0.42,
+ "acc_stderr,none": 0.049604496374885836
+ },
+ "mmlu_college_computer_science": {
+ "alias": " - college_computer_science",
+ "acc,none": 0.54,
+ "acc_stderr,none": 0.05009082659620332
+ },
+ "mmlu_college_mathematics": {
+ "alias": " - college_mathematics",
+ "acc,none": 0.32,
+ "acc_stderr,none": 0.04688261722621505
+ },
+ "mmlu_college_physics": {
+ "alias": " - college_physics",
+ "acc,none": 0.4411764705882353,
+ "acc_stderr,none": 0.04940635630605659
+ },
+ "mmlu_computer_security": {
+ "alias": " - computer_security",
+ "acc,none": 0.79,
+ "acc_stderr,none": 0.040936018074033256
+ },
+ "mmlu_conceptual_physics": {
+ "alias": " - conceptual_physics",
+ "acc,none": 0.574468085106383,
+ "acc_stderr,none": 0.03232146916224468
+ },
+ "mmlu_electrical_engineering": {
+ "alias": " - electrical_engineering",
+ "acc,none": 0.5862068965517241,
+ "acc_stderr,none": 0.04104269211806232
+ },
+ "mmlu_elementary_mathematics": {
+ "alias": " - elementary_mathematics",
+ "acc,none": 0.3915343915343915,
+ "acc_stderr,none": 0.02513809138885111
+ },
+ "mmlu_high_school_biology": {
+ "alias": " - high_school_biology",
+ "acc,none": 0.7645161290322581,
+ "acc_stderr,none": 0.02413763242933771
+ },
+ "mmlu_high_school_chemistry": {
+ "alias": " - high_school_chemistry",
+ "acc,none": 0.5172413793103449,
+ "acc_stderr,none": 0.035158955511656986
+ },
+ "mmlu_high_school_computer_science": {
+ "alias": " - high_school_computer_science",
+ "acc,none": 0.71,
+ "acc_stderr,none": 0.045604802157206845
+ },
+ "mmlu_high_school_mathematics": {
+ "alias": " - high_school_mathematics",
+ "acc,none": 0.3592592592592593,
+ "acc_stderr,none": 0.029252905927251972
+ },
+ "mmlu_high_school_physics": {
+ "alias": " - high_school_physics",
+ "acc,none": 0.37748344370860926,
+ "acc_stderr,none": 0.0395802723112157
+ },
+ "mmlu_high_school_statistics": {
+ "alias": " - high_school_statistics",
+ "acc,none": 0.49074074074074076,
+ "acc_stderr,none": 0.034093869469927006
+ },
+ "mmlu_machine_learning": {
+ "alias": " - machine_learning",
+ "acc,none": 0.49107142857142855,
+ "acc_stderr,none": 0.04745033255489123
+ },
+ "truthfulqa": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.5182294709510535,
+ "acc_stderr,none": 0.011594047810301133,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "alias": " - truthfulqa"
+ },
+ "truthfulqa_gen": {
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.01744454444766118,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776224,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "alias": " - truthfulqa_gen"
+ },
+ "truthfulqa_mc1": {
+ "acc,none": 0.42962056303549573,
+ "acc_stderr,none": 0.017329234580409095,
+ "alias": " - truthfulqa_mc1"
+ },
+ "truthfulqa_mc2": {
+ "acc,none": 0.6068383788666114,
+ "acc_stderr,none": 0.01540731668290581,
+ "alias": " - truthfulqa_mc2"
+ },
+ "winogrande": {
+ "acc,none": 0.7742699289660616,
+ "acc_stderr,none": 0.011749626260902557,
+ "alias": " - winogrande"
+ },
+ "eq_bench": {
+ "eqbench,none": 71.54290317887124,
+ "eqbench_stderr,none": 2.0457017558365664,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "groups": {
+ "Open LLM Leaderboard": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "exact_match,strict-match": 0.6277482941622441,
+ "exact_match_stderr,strict-match": 0.013315375362565036,
+ "acc_norm,none": 0.8359193864811842,
+ "acc_norm_stderr,none": 0.00345585439013916,
+ "exact_match,flexible-extract": 0.6315390447308568,
+ "exact_match_stderr,flexible-extract": 0.013287342651674573,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.6514042969190568,
+ "acc_stderr,none": 0.0027599234131481932,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "alias": "Open LLM Leaderboard"
+ },
+ "mmlu": {
+ "acc,none": 0.6364477994587665,
+ "acc_stderr,none": 0.0038271694585367516,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.5895855472901169,
+ "acc_stderr,none": 0.006760272274548804
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.7055037013196009,
+ "acc_stderr,none": 0.007845586852292294
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7432564185895353,
+ "acc_stderr,none": 0.007701333272557918
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5340945131620679,
+ "acc_stderr,none": 0.008514164103258936
+ },
+ "truthfulqa": {
+ "bleu_diff,none": 3.006407428437984,
+ "bleu_diff_stderr,none": 0.6902983955128965,
+ "bleu_max,none": 22.15536379157621,
+ "bleu_max_stderr,none": 0.7500703499499018,
+ "rouge2_max,none": 33.70863440140025,
+ "rouge2_max_stderr,none": 0.9588688279748141,
+ "rougeL_acc,none": 0.5128518971848225,
+ "rougeL_acc_stderr,none": 0.017497717944299843,
+ "rougeL_diff,none": 3.7826634155902985,
+ "rougeL_diff_stderr,none": 0.958164425989176,
+ "rouge1_acc,none": 0.5410036719706243,
+ "rouge1_acc_stderr,none": 0.017444544447661182,
+ "rouge1_max,none": 47.813085751155874,
+ "rouge1_max_stderr,none": 0.8351008973483007,
+ "rouge2_acc,none": 0.4541003671970624,
+ "rouge2_acc_stderr,none": 0.017429593091323504,
+ "bleu_acc,none": 0.5091799265605875,
+ "bleu_acc_stderr,none": 0.01750055072481974,
+ "rouge2_diff,none": 3.5225531232870635,
+ "rouge2_diff_stderr,none": 1.0505034205658943,
+ "rouge1_diff,none": 4.221115729669922,
+ "rouge1_diff_stderr,none": 0.9451610499576943,
+ "acc,none": 0.5182294709510535,
+ "acc_stderr,none": 0.011594047810301133,
+ "rougeL_max,none": 44.3329487904666,
+ "rougeL_max_stderr,none": 0.8588505055776223,
+ "alias": " - truthfulqa"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "truthfulqa": [
+ "truthfulqa_gen",
+ "truthfulqa_mc1",
+ "truthfulqa_mc2"
+ ],
+ "mmlu_stem": [
+ "mmlu_high_school_chemistry",
+ "mmlu_college_physics",
+ "mmlu_college_mathematics",
+ "mmlu_astronomy",
+ "mmlu_high_school_physics",
+ "mmlu_computer_security",
+ "mmlu_elementary_mathematics",
+ "mmlu_electrical_engineering",
+ "mmlu_college_biology",
+ "mmlu_machine_learning",
+ "mmlu_high_school_biology",
+ "mmlu_high_school_mathematics",
+ "mmlu_anatomy",
+ "mmlu_high_school_statistics",
+ "mmlu_college_chemistry",
+ "mmlu_conceptual_physics",
+ "mmlu_high_school_computer_science",
+ "mmlu_college_computer_science",
+ "mmlu_abstract_algebra"
+ ],
+ "mmlu_other": [
+ "mmlu_professional_medicine",
+ "mmlu_professional_accounting",
+ "mmlu_management",
+ "mmlu_global_facts",
+ "mmlu_college_medicine",
+ "mmlu_business_ethics",
+ "mmlu_nutrition",
+ "mmlu_medical_genetics",
+ "mmlu_virology",
+ "mmlu_human_aging",
+ "mmlu_clinical_knowledge",
+ "mmlu_miscellaneous",
+ "mmlu_marketing"
+ ],
+ "mmlu_social_sciences": [
+ "mmlu_high_school_psychology",
+ "mmlu_sociology",
+ "mmlu_high_school_government_and_politics",
+ "mmlu_public_relations",
+ "mmlu_high_school_macroeconomics",
+ "mmlu_high_school_geography",
+ "mmlu_high_school_microeconomics",
+ "mmlu_security_studies",
+ "mmlu_us_foreign_policy",
+ "mmlu_professional_psychology",
+ "mmlu_human_sexuality",
+ "mmlu_econometrics"
+ ],
+ "mmlu_humanities": [
+ "mmlu_high_school_european_history",
+ "mmlu_formal_logic",
+ "mmlu_moral_scenarios",
+ "mmlu_moral_disputes",
+ "mmlu_world_religions",
+ "mmlu_high_school_world_history",
+ "mmlu_logical_fallacies",
+ "mmlu_international_law",
+ "mmlu_philosophy",
+ "mmlu_professional_law",
+ "mmlu_high_school_us_history",
+ "mmlu_prehistory",
+ "mmlu_jurisprudence"
+ ],
+ "mmlu": [
+ "mmlu_humanities",
+ "mmlu_social_sciences",
+ "mmlu_other",
+ "mmlu_stem"
+ ],
+ "Open LLM Leaderboard": [
+ "gsm8k",
+ "winogrande",
+ "mmlu",
+ "truthfulqa",
+ "hellaswag",
+ "arc_challenge"
+ ]
+ },
+ "configs": {
+ "arc_challenge": {
+ "task": "arc_challenge",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "allenai/ai2_arc",
+ "dataset_name": "ARC-Challenge",
+ "training_split": "train",
+ "validation_split": "validation",
+ "test_split": "test",
+ "fewshot_split": "validation",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
+ "doc_to_choice": "{{choices.text}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 25,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "gsm8k": {
+ "task": "gsm8k",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "gsm8k",
+ "dataset_name": "main",
+ "training_split": "train",
+ "test_split": "test",
+ "fewshot_split": "train",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{answer}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "exact_match",
+ "aggregation": "mean",
+ "higher_is_better": true,
+ "ignore_case": true,
+ "ignore_punctuation": false,
+ "regexes_to_ignore": [
+ ",",
+ "\\$",
+ "(?s).*#### ",
+ "\\.$"
+ ]
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "Question:",
+ "",
+ "<|im_end|>"
+ ],
+ "do_sample": false,
+ "temperature": 0.0
+ },
+ "repeats": 1,
+ "filter_list": [
+ {
+ "name": "strict-match",
+ "filter": [
+ {
+ "function": "regex",
+ "regex_pattern": "#### (\\-?[0-9\\.\\,]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ },
+ {
+ "name": "flexible-extract",
+ "filter": [
+ {
+ "function": "regex",
+ "group_select": -1,
+ "regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ }
+ ],
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 10,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "mmlu_abstract_algebra": {
+ "task": "mmlu_abstract_algebra",
+ "task_alias": "abstract_algebra",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "abstract_algebra",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_anatomy": {
+ "task": "mmlu_anatomy",
+ "task_alias": "anatomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "anatomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_astronomy": {
+ "task": "mmlu_astronomy",
+ "task_alias": "astronomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "astronomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_business_ethics": {
+ "task": "mmlu_business_ethics",
+ "task_alias": "business_ethics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "business_ethics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_clinical_knowledge": {
+ "task": "mmlu_clinical_knowledge",
+ "task_alias": "clinical_knowledge",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "clinical_knowledge",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_biology": {
+ "task": "mmlu_college_biology",
+ "task_alias": "college_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_chemistry": {
+ "task": "mmlu_college_chemistry",
+ "task_alias": "college_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_computer_science": {
+ "task": "mmlu_college_computer_science",
+ "task_alias": "college_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_mathematics": {
+ "task": "mmlu_college_mathematics",
+ "task_alias": "college_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_medicine": {
+ "task": "mmlu_college_medicine",
+ "task_alias": "college_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_physics": {
+ "task": "mmlu_college_physics",
+ "task_alias": "college_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_computer_security": {
+ "task": "mmlu_computer_security",
+ "task_alias": "computer_security",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "computer_security",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_conceptual_physics": {
+ "task": "mmlu_conceptual_physics",
+ "task_alias": "conceptual_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "conceptual_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_econometrics": {
+ "task": "mmlu_econometrics",
+ "task_alias": "econometrics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "econometrics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_electrical_engineering": {
+ "task": "mmlu_electrical_engineering",
+ "task_alias": "electrical_engineering",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "electrical_engineering",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_elementary_mathematics": {
+ "task": "mmlu_elementary_mathematics",
+ "task_alias": "elementary_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "elementary_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_formal_logic": {
+ "task": "mmlu_formal_logic",
+ "task_alias": "formal_logic",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "formal_logic",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_global_facts": {
+ "task": "mmlu_global_facts",
+ "task_alias": "global_facts",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "global_facts",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_biology": {
+ "task": "mmlu_high_school_biology",
+ "task_alias": "high_school_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_chemistry": {
+ "task": "mmlu_high_school_chemistry",
+ "task_alias": "high_school_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_computer_science": {
+ "task": "mmlu_high_school_computer_science",
+ "task_alias": "high_school_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_european_history": {
+ "task": "mmlu_high_school_european_history",
+ "task_alias": "high_school_european_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_european_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_geography": {
+ "task": "mmlu_high_school_geography",
+ "task_alias": "high_school_geography",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_geography",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_government_and_politics": {
+ "task": "mmlu_high_school_government_and_politics",
+ "task_alias": "high_school_government_and_politics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_government_and_politics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_macroeconomics": {
+ "task": "mmlu_high_school_macroeconomics",
+ "task_alias": "high_school_macroeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_macroeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_mathematics": {
+ "task": "mmlu_high_school_mathematics",
+ "task_alias": "high_school_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_microeconomics": {
+ "task": "mmlu_high_school_microeconomics",
+ "task_alias": "high_school_microeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_microeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_physics": {
+ "task": "mmlu_high_school_physics",
+ "task_alias": "high_school_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_psychology": {
+ "task": "mmlu_high_school_psychology",
+ "task_alias": "high_school_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_statistics": {
+ "task": "mmlu_high_school_statistics",
+ "task_alias": "high_school_statistics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_statistics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_us_history": {
+ "task": "mmlu_high_school_us_history",
+ "task_alias": "high_school_us_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_us_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_world_history": {
+ "task": "mmlu_high_school_world_history",
+ "task_alias": "high_school_world_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_world_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_aging": {
+ "task": "mmlu_human_aging",
+ "task_alias": "human_aging",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_aging",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_sexuality": {
+ "task": "mmlu_human_sexuality",
+ "task_alias": "human_sexuality",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_sexuality",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_international_law": {
+ "task": "mmlu_international_law",
+ "task_alias": "international_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "international_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_jurisprudence": {
+ "task": "mmlu_jurisprudence",
+ "task_alias": "jurisprudence",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "jurisprudence",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_logical_fallacies": {
+ "task": "mmlu_logical_fallacies",
+ "task_alias": "logical_fallacies",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "logical_fallacies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_machine_learning": {
+ "task": "mmlu_machine_learning",
+ "task_alias": "machine_learning",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "machine_learning",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_management": {
+ "task": "mmlu_management",
+ "task_alias": "management",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "management",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_marketing": {
+ "task": "mmlu_marketing",
+ "task_alias": "marketing",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "marketing",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_medical_genetics": {
+ "task": "mmlu_medical_genetics",
+ "task_alias": "medical_genetics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "medical_genetics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_miscellaneous": {
+ "task": "mmlu_miscellaneous",
+ "task_alias": "miscellaneous",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "miscellaneous",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_disputes": {
+ "task": "mmlu_moral_disputes",
+ "task_alias": "moral_disputes",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_disputes",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_scenarios": {
+ "task": "mmlu_moral_scenarios",
+ "task_alias": "moral_scenarios",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_scenarios",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_nutrition": {
+ "task": "mmlu_nutrition",
+ "task_alias": "nutrition",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "nutrition",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_philosophy": {
+ "task": "mmlu_philosophy",
+ "task_alias": "philosophy",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "philosophy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_prehistory": {
+ "task": "mmlu_prehistory",
+ "task_alias": "prehistory",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "prehistory",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_accounting": {
+ "task": "mmlu_professional_accounting",
+ "task_alias": "professional_accounting",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_accounting",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_law": {
+ "task": "mmlu_professional_law",
+ "task_alias": "professional_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_medicine": {
+ "task": "mmlu_professional_medicine",
+ "task_alias": "professional_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_psychology": {
+ "task": "mmlu_professional_psychology",
+ "task_alias": "professional_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_public_relations": {
+ "task": "mmlu_public_relations",
+ "task_alias": "public_relations",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "public_relations",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_security_studies": {
+ "task": "mmlu_security_studies",
+ "task_alias": "security_studies",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "security_studies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_sociology": {
+ "task": "mmlu_sociology",
+ "task_alias": "sociology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "sociology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_us_foreign_policy": {
+ "task": "mmlu_us_foreign_policy",
+ "task_alias": "us_foreign_policy",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "us_foreign_policy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_virology": {
+ "task": "mmlu_virology",
+ "task_alias": "virology",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "virology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_world_religions": {
+ "task": "mmlu_world_religions",
+ "task_alias": "world_religions",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "world_religions",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "truthfulqa_gen": {
+ "task": "truthfulqa_gen",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "generation",
+ "validation_split": "validation",
+ "process_docs": "def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:\n return dataset.map(preprocess_function)\n",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question}}",
+ "doc_to_target": " ",
+ "process_results": "def process_results_gen(doc, results):\n completion = results[0]\n true_refs, false_refs = doc[\"correct_answers\"], doc[\"incorrect_answers\"]\n all_refs = true_refs + false_refs\n\n # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.\n\n # # BLEURT\n # bleurt_scores_true = self.bleurt.compute(\n # predictions=[completion] * len(true_refs), references=true_refs\n # )[\"scores\"]\n # bleurt_scores_false = self.bleurt.compute(\n # predictions=[completion] * len(false_refs), references=false_refs\n # )[\"scores\"]\n # bleurt_correct = max(bleurt_scores_true)\n # bleurt_incorrect = max(bleurt_scores_false)\n # bleurt_max = bleurt_correct\n # bleurt_diff = bleurt_correct - bleurt_incorrect\n # bleurt_acc = int(bleurt_correct > bleurt_incorrect)\n\n # BLEU\n bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]\n bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])\n bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])\n bleu_max = bleu_correct\n bleu_diff = bleu_correct - bleu_incorrect\n bleu_acc = int(bleu_correct > bleu_incorrect)\n\n # ROUGE-N\n rouge_scores = [rouge([ref], [completion]) for ref in all_refs]\n # ROUGE-1\n rouge1_scores = [score[\"rouge1\"] for score in rouge_scores]\n rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])\n rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])\n rouge1_max = rouge1_correct\n rouge1_diff = rouge1_correct - rouge1_incorrect\n rouge1_acc = int(rouge1_correct > rouge1_incorrect)\n # ROUGE-2\n rouge2_scores = [score[\"rouge2\"] for score in rouge_scores]\n rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])\n rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])\n rouge2_max = rouge2_correct\n rouge2_diff = rouge2_correct - rouge2_incorrect\n rouge2_acc = int(rouge2_correct > rouge2_incorrect)\n # ROUGE-L\n rougeL_scores = [score[\"rougeLsum\"] for score in rouge_scores]\n rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])\n rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])\n rougeL_max = rougeL_correct\n rougeL_diff = rougeL_correct - rougeL_incorrect\n rougeL_acc = int(rougeL_correct > rougeL_incorrect)\n\n return {\n # \"bleurt_max\": bleurt_max,\n # \"bleurt_acc\": bleurt_acc,\n # \"bleurt_diff\": bleurt_diff,\n \"bleu_max\": bleu_max,\n \"bleu_acc\": bleu_acc,\n \"bleu_diff\": bleu_diff,\n \"rouge1_max\": rouge1_max,\n \"rouge1_acc\": rouge1_acc,\n \"rouge1_diff\": rouge1_diff,\n \"rouge2_max\": rouge2_max,\n \"rouge2_acc\": rouge2_acc,\n \"rouge2_diff\": rouge2_diff,\n \"rougeL_max\": rougeL_max,\n \"rougeL_acc\": rougeL_acc,\n \"rougeL_diff\": rougeL_diff,\n }\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "bleu_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "\n\n"
+ ],
+ "do_sample": false
+ },
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "truthfulqa_mc1": {
+ "task": "truthfulqa_mc1",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc1_targets.choices}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "truthfulqa_mc2": {
+ "task": "truthfulqa_mc2",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc2_targets.choices}}",
+ "process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "winogrande": {
+ "task": "winogrande",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "winogrande",
+ "dataset_name": "winogrande_xl",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
+ "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
+ "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "sentence",
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "arc_challenge": 1.0,
+ "eq_bench": 2.1,
+ "gsm8k": 3.0,
+ "hellaswag": 1.0,
+ "mmlu_abstract_algebra": 0.0,
+ "mmlu_anatomy": 0.0,
+ "mmlu_astronomy": 0.0,
+ "mmlu_business_ethics": 0.0,
+ "mmlu_clinical_knowledge": 0.0,
+ "mmlu_college_biology": 0.0,
+ "mmlu_college_chemistry": 0.0,
+ "mmlu_college_computer_science": 0.0,
+ "mmlu_college_mathematics": 0.0,
+ "mmlu_college_medicine": 0.0,
+ "mmlu_college_physics": 0.0,
+ "mmlu_computer_security": 0.0,
+ "mmlu_conceptual_physics": 0.0,
+ "mmlu_econometrics": 0.0,
+ "mmlu_electrical_engineering": 0.0,
+ "mmlu_elementary_mathematics": 0.0,
+ "mmlu_formal_logic": 0.0,
+ "mmlu_global_facts": 0.0,
+ "mmlu_high_school_biology": 0.0,
+ "mmlu_high_school_chemistry": 0.0,
+ "mmlu_high_school_computer_science": 0.0,
+ "mmlu_high_school_european_history": 0.0,
+ "mmlu_high_school_geography": 0.0,
+ "mmlu_high_school_government_and_politics": 0.0,
+ "mmlu_high_school_macroeconomics": 0.0,
+ "mmlu_high_school_mathematics": 0.0,
+ "mmlu_high_school_microeconomics": 0.0,
+ "mmlu_high_school_physics": 0.0,
+ "mmlu_high_school_psychology": 0.0,
+ "mmlu_high_school_statistics": 0.0,
+ "mmlu_high_school_us_history": 0.0,
+ "mmlu_high_school_world_history": 0.0,
+ "mmlu_human_aging": 0.0,
+ "mmlu_human_sexuality": 0.0,
+ "mmlu_international_law": 0.0,
+ "mmlu_jurisprudence": 0.0,
+ "mmlu_logical_fallacies": 0.0,
+ "mmlu_machine_learning": 0.0,
+ "mmlu_management": 0.0,
+ "mmlu_marketing": 0.0,
+ "mmlu_medical_genetics": 0.0,
+ "mmlu_miscellaneous": 0.0,
+ "mmlu_moral_disputes": 0.0,
+ "mmlu_moral_scenarios": 0.0,
+ "mmlu_nutrition": 0.0,
+ "mmlu_philosophy": 0.0,
+ "mmlu_prehistory": 0.0,
+ "mmlu_professional_accounting": 0.0,
+ "mmlu_professional_law": 0.0,
+ "mmlu_professional_medicine": 0.0,
+ "mmlu_professional_psychology": 0.0,
+ "mmlu_public_relations": 0.0,
+ "mmlu_security_studies": 0.0,
+ "mmlu_sociology": 0.0,
+ "mmlu_us_foreign_policy": 0.0,
+ "mmlu_virology": 0.0,
+ "mmlu_world_religions": 0.0,
+ "truthfulqa_gen": 3.0,
+ "truthfulqa_mc1": 2.0,
+ "truthfulqa_mc2": 2.0,
+ "winogrande": 1.0
+ },
+ "n-shot": {
+ "Open LLM Leaderboard": 5,
+ "arc_challenge": 25,
+ "eq_bench": 0,
+ "gsm8k": 5,
+ "hellaswag": 10,
+ "mmlu": 0,
+ "mmlu_abstract_algebra": 5,
+ "mmlu_anatomy": 5,
+ "mmlu_astronomy": 5,
+ "mmlu_business_ethics": 5,
+ "mmlu_clinical_knowledge": 5,
+ "mmlu_college_biology": 5,
+ "mmlu_college_chemistry": 5,
+ "mmlu_college_computer_science": 5,
+ "mmlu_college_mathematics": 5,
+ "mmlu_college_medicine": 5,
+ "mmlu_college_physics": 5,
+ "mmlu_computer_security": 5,
+ "mmlu_conceptual_physics": 5,
+ "mmlu_econometrics": 5,
+ "mmlu_electrical_engineering": 5,
+ "mmlu_elementary_mathematics": 5,
+ "mmlu_formal_logic": 5,
+ "mmlu_global_facts": 5,
+ "mmlu_high_school_biology": 5,
+ "mmlu_high_school_chemistry": 5,
+ "mmlu_high_school_computer_science": 5,
+ "mmlu_high_school_european_history": 5,
+ "mmlu_high_school_geography": 5,
+ "mmlu_high_school_government_and_politics": 5,
+ "mmlu_high_school_macroeconomics": 5,
+ "mmlu_high_school_mathematics": 5,
+ "mmlu_high_school_microeconomics": 5,
+ "mmlu_high_school_physics": 5,
+ "mmlu_high_school_psychology": 5,
+ "mmlu_high_school_statistics": 5,
+ "mmlu_high_school_us_history": 5,
+ "mmlu_high_school_world_history": 5,
+ "mmlu_human_aging": 5,
+ "mmlu_human_sexuality": 5,
+ "mmlu_humanities": 5,
+ "mmlu_international_law": 5,
+ "mmlu_jurisprudence": 5,
+ "mmlu_logical_fallacies": 5,
+ "mmlu_machine_learning": 5,
+ "mmlu_management": 5,
+ "mmlu_marketing": 5,
+ "mmlu_medical_genetics": 5,
+ "mmlu_miscellaneous": 5,
+ "mmlu_moral_disputes": 5,
+ "mmlu_moral_scenarios": 5,
+ "mmlu_nutrition": 5,
+ "mmlu_other": 5,
+ "mmlu_philosophy": 5,
+ "mmlu_prehistory": 5,
+ "mmlu_professional_accounting": 5,
+ "mmlu_professional_law": 5,
+ "mmlu_professional_medicine": 5,
+ "mmlu_professional_psychology": 5,
+ "mmlu_public_relations": 5,
+ "mmlu_security_studies": 5,
+ "mmlu_social_sciences": 5,
+ "mmlu_sociology": 5,
+ "mmlu_stem": 5,
+ "mmlu_us_foreign_policy": 5,
+ "mmlu_virology": 5,
+ "mmlu_world_religions": 5,
+ "truthfulqa": 0,
+ "truthfulqa_gen": 0,
+ "truthfulqa_mc1": 0,
+ "truthfulqa_mc2": 0,
+ "winogrande": 5
+ },
+ "higher_is_better": {
+ "Open LLM Leaderboard": {
+ "exact_match": true,
+ "acc": true,
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc_norm": true
+ },
+ "arc_challenge": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "gsm8k": {
+ "exact_match": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "mmlu": {
+ "acc": true
+ },
+ "mmlu_abstract_algebra": {
+ "acc": true
+ },
+ "mmlu_anatomy": {
+ "acc": true
+ },
+ "mmlu_astronomy": {
+ "acc": true
+ },
+ "mmlu_business_ethics": {
+ "acc": true
+ },
+ "mmlu_clinical_knowledge": {
+ "acc": true
+ },
+ "mmlu_college_biology": {
+ "acc": true
+ },
+ "mmlu_college_chemistry": {
+ "acc": true
+ },
+ "mmlu_college_computer_science": {
+ "acc": true
+ },
+ "mmlu_college_mathematics": {
+ "acc": true
+ },
+ "mmlu_college_medicine": {
+ "acc": true
+ },
+ "mmlu_college_physics": {
+ "acc": true
+ },
+ "mmlu_computer_security": {
+ "acc": true
+ },
+ "mmlu_conceptual_physics": {
+ "acc": true
+ },
+ "mmlu_econometrics": {
+ "acc": true
+ },
+ "mmlu_electrical_engineering": {
+ "acc": true
+ },
+ "mmlu_elementary_mathematics": {
+ "acc": true
+ },
+ "mmlu_formal_logic": {
+ "acc": true
+ },
+ "mmlu_global_facts": {
+ "acc": true
+ },
+ "mmlu_high_school_biology": {
+ "acc": true
+ },
+ "mmlu_high_school_chemistry": {
+ "acc": true
+ },
+ "mmlu_high_school_computer_science": {
+ "acc": true
+ },
+ "mmlu_high_school_european_history": {
+ "acc": true
+ },
+ "mmlu_high_school_geography": {
+ "acc": true
+ },
+ "mmlu_high_school_government_and_politics": {
+ "acc": true
+ },
+ "mmlu_high_school_macroeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_mathematics": {
+ "acc": true
+ },
+ "mmlu_high_school_microeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_physics": {
+ "acc": true
+ },
+ "mmlu_high_school_psychology": {
+ "acc": true
+ },
+ "mmlu_high_school_statistics": {
+ "acc": true
+ },
+ "mmlu_high_school_us_history": {
+ "acc": true
+ },
+ "mmlu_high_school_world_history": {
+ "acc": true
+ },
+ "mmlu_human_aging": {
+ "acc": true
+ },
+ "mmlu_human_sexuality": {
+ "acc": true
+ },
+ "mmlu_humanities": {
+ "acc": true
+ },
+ "mmlu_international_law": {
+ "acc": true
+ },
+ "mmlu_jurisprudence": {
+ "acc": true
+ },
+ "mmlu_logical_fallacies": {
+ "acc": true
+ },
+ "mmlu_machine_learning": {
+ "acc": true
+ },
+ "mmlu_management": {
+ "acc": true
+ },
+ "mmlu_marketing": {
+ "acc": true
+ },
+ "mmlu_medical_genetics": {
+ "acc": true
+ },
+ "mmlu_miscellaneous": {
+ "acc": true
+ },
+ "mmlu_moral_disputes": {
+ "acc": true
+ },
+ "mmlu_moral_scenarios": {
+ "acc": true
+ },
+ "mmlu_nutrition": {
+ "acc": true
+ },
+ "mmlu_other": {
+ "acc": true
+ },
+ "mmlu_philosophy": {
+ "acc": true
+ },
+ "mmlu_prehistory": {
+ "acc": true
+ },
+ "mmlu_professional_accounting": {
+ "acc": true
+ },
+ "mmlu_professional_law": {
+ "acc": true
+ },
+ "mmlu_professional_medicine": {
+ "acc": true
+ },
+ "mmlu_professional_psychology": {
+ "acc": true
+ },
+ "mmlu_public_relations": {
+ "acc": true
+ },
+ "mmlu_security_studies": {
+ "acc": true
+ },
+ "mmlu_social_sciences": {
+ "acc": true
+ },
+ "mmlu_sociology": {
+ "acc": true
+ },
+ "mmlu_stem": {
+ "acc": true
+ },
+ "mmlu_us_foreign_policy": {
+ "acc": true
+ },
+ "mmlu_virology": {
+ "acc": true
+ },
+ "mmlu_world_religions": {
+ "acc": true
+ },
+ "truthfulqa": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc": true
+ },
+ "truthfulqa_gen": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true
+ },
+ "truthfulqa_mc1": {
+ "acc": true
+ },
+ "truthfulqa_mc2": {
+ "acc": true
+ },
+ "winogrande": {
+ "acc": true
+ }
+ },
+ "n-samples": {
+ "gsm8k": {
+ "original": 1319,
+ "effective": 1319
+ },
+ "winogrande": {
+ "original": 1267,
+ "effective": 1267
+ },
+ "mmlu_high_school_european_history": {
+ "original": 165,
+ "effective": 165
+ },
+ "mmlu_formal_logic": {
+ "original": 126,
+ "effective": 126
+ },
+ "mmlu_moral_scenarios": {
+ "original": 895,
+ "effective": 895
+ },
+ "mmlu_moral_disputes": {
+ "original": 346,
+ "effective": 346
+ },
+ "mmlu_world_religions": {
+ "original": 171,
+ "effective": 171
+ },
+ "mmlu_high_school_world_history": {
+ "original": 237,
+ "effective": 237
+ },
+ "mmlu_logical_fallacies": {
+ "original": 163,
+ "effective": 163
+ },
+ "mmlu_international_law": {
+ "original": 121,
+ "effective": 121
+ },
+ "mmlu_philosophy": {
+ "original": 311,
+ "effective": 311
+ },
+ "mmlu_professional_law": {
+ "original": 1534,
+ "effective": 1534
+ },
+ "mmlu_high_school_us_history": {
+ "original": 204,
+ "effective": 204
+ },
+ "mmlu_prehistory": {
+ "original": 324,
+ "effective": 324
+ },
+ "mmlu_jurisprudence": {
+ "original": 108,
+ "effective": 108
+ },
+ "mmlu_high_school_psychology": {
+ "original": 545,
+ "effective": 545
+ },
+ "mmlu_sociology": {
+ "original": 201,
+ "effective": 201
+ },
+ "mmlu_high_school_government_and_politics": {
+ "original": 193,
+ "effective": 193
+ },
+ "mmlu_public_relations": {
+ "original": 110,
+ "effective": 110
+ },
+ "mmlu_high_school_macroeconomics": {
+ "original": 390,
+ "effective": 390
+ },
+ "mmlu_high_school_geography": {
+ "original": 198,
+ "effective": 198
+ },
+ "mmlu_high_school_microeconomics": {
+ "original": 238,
+ "effective": 238
+ },
+ "mmlu_security_studies": {
+ "original": 245,
+ "effective": 245
+ },
+ "mmlu_us_foreign_policy": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_professional_psychology": {
+ "original": 612,
+ "effective": 612
+ },
+ "mmlu_human_sexuality": {
+ "original": 131,
+ "effective": 131
+ },
+ "mmlu_econometrics": {
+ "original": 114,
+ "effective": 114
+ },
+ "mmlu_professional_medicine": {
+ "original": 272,
+ "effective": 272
+ },
+ "mmlu_professional_accounting": {
+ "original": 282,
+ "effective": 282
+ },
+ "mmlu_management": {
+ "original": 103,
+ "effective": 103
+ },
+ "mmlu_global_facts": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_medicine": {
+ "original": 173,
+ "effective": 173
+ },
+ "mmlu_business_ethics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_nutrition": {
+ "original": 306,
+ "effective": 306
+ },
+ "mmlu_medical_genetics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_virology": {
+ "original": 166,
+ "effective": 166
+ },
+ "mmlu_human_aging": {
+ "original": 223,
+ "effective": 223
+ },
+ "mmlu_clinical_knowledge": {
+ "original": 265,
+ "effective": 265
+ },
+ "mmlu_miscellaneous": {
+ "original": 783,
+ "effective": 783
+ },
+ "mmlu_marketing": {
+ "original": 234,
+ "effective": 234
+ },
+ "mmlu_high_school_chemistry": {
+ "original": 203,
+ "effective": 203
+ },
+ "mmlu_college_physics": {
+ "original": 102,
+ "effective": 102
+ },
+ "mmlu_college_mathematics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_astronomy": {
+ "original": 152,
+ "effective": 152
+ },
+ "mmlu_high_school_physics": {
+ "original": 151,
+ "effective": 151
+ },
+ "mmlu_computer_security": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_elementary_mathematics": {
+ "original": 378,
+ "effective": 378
+ },
+ "mmlu_electrical_engineering": {
+ "original": 145,
+ "effective": 145
+ },
+ "mmlu_college_biology": {
+ "original": 144,
+ "effective": 144
+ },
+ "mmlu_machine_learning": {
+ "original": 112,
+ "effective": 112
+ },
+ "mmlu_high_school_biology": {
+ "original": 310,
+ "effective": 310
+ },
+ "mmlu_high_school_mathematics": {
+ "original": 270,
+ "effective": 270
+ },
+ "mmlu_anatomy": {
+ "original": 135,
+ "effective": 135
+ },
+ "mmlu_high_school_statistics": {
+ "original": 216,
+ "effective": 216
+ },
+ "mmlu_college_chemistry": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_conceptual_physics": {
+ "original": 235,
+ "effective": 235
+ },
+ "mmlu_high_school_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_abstract_algebra": {
+ "original": 100,
+ "effective": 100
+ },
+ "truthfulqa_gen": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc1": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc2": {
+ "original": 817,
+ "effective": 817
+ },
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "arc_challenge": {
+ "original": 1172,
+ "effective": 1172
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Iced-Lemon-Cookie-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e0656657a5d5cc73bc16d9852f5894f31ed7fcb5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 2
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719586774.8240964,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Iced-Lemon-Cookie-7B",
+ "model_name_sanitized": "FallenMerick__Iced-Lemon-Cookie-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 139158.078961917,
+ "end_time": 175571.331573164,
+ "total_evaluation_time_seconds": "36413.25261124701"
+}
\ No newline at end of file
diff --git a/FallenMerick__Smart-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-28T14-56-07.716918-checkpoint.json b/FallenMerick__Smart-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-28T14-56-07.716918-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..9a2f104e8cba6d24182ceadad21a8f788548c78a
--- /dev/null
+++ b/FallenMerick__Smart-Lemon-Cookie-7B/.ipynb_checkpoints/results_2024-06-28T14-56-07.716918-checkpoint.json
@@ -0,0 +1,4035 @@
+{
+ "results": {
+ "Open LLM Leaderboard": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571269,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "acc_norm,none": 0.8349384697699305,
+ "acc_norm_stderr,none": 0.0034656728893589055,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "acc,none": 0.6524833304898358,
+ "acc_stderr,none": 0.002755144452920947,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": "Open LLM Leaderboard"
+ },
+ "arc_challenge": {
+ "acc,none": 0.6390784982935154,
+ "acc_stderr,none": 0.014034761386175458,
+ "acc_norm,none": 0.6706484641638225,
+ "acc_norm_stderr,none": 0.013734057652635473,
+ "alias": " - arc_challenge"
+ },
+ "gsm8k": {
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571267,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "alias": " - gsm8k"
+ },
+ "hellaswag": {
+ "acc,none": 0.6714797849034057,
+ "acc_stderr,none": 0.00468715199479105,
+ "acc_norm,none": 0.8541127265484963,
+ "acc_norm_stderr,none": 0.0035227174995242872,
+ "alias": " - hellaswag"
+ },
+ "mmlu": {
+ "acc,none": 0.6370175188719556,
+ "acc_stderr,none": 0.0038187579064371084,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.593836344314559,
+ "acc_stderr,none": 0.006701956606258013
+ },
+ "mmlu_formal_logic": {
+ "alias": " - formal_logic",
+ "acc,none": 0.4523809523809524,
+ "acc_stderr,none": 0.044518079590553275
+ },
+ "mmlu_high_school_european_history": {
+ "alias": " - high_school_european_history",
+ "acc,none": 0.7757575757575758,
+ "acc_stderr,none": 0.03256866661681102
+ },
+ "mmlu_high_school_us_history": {
+ "alias": " - high_school_us_history",
+ "acc,none": 0.8480392156862745,
+ "acc_stderr,none": 0.025195658428931792
+ },
+ "mmlu_high_school_world_history": {
+ "alias": " - high_school_world_history",
+ "acc,none": 0.8227848101265823,
+ "acc_stderr,none": 0.024856364184503238
+ },
+ "mmlu_international_law": {
+ "alias": " - international_law",
+ "acc,none": 0.8181818181818182,
+ "acc_stderr,none": 0.03520893951097654
+ },
+ "mmlu_jurisprudence": {
+ "alias": " - jurisprudence",
+ "acc,none": 0.8240740740740741,
+ "acc_stderr,none": 0.036809181416738807
+ },
+ "mmlu_logical_fallacies": {
+ "alias": " - logical_fallacies",
+ "acc,none": 0.7852760736196319,
+ "acc_stderr,none": 0.03226219377286774
+ },
+ "mmlu_moral_disputes": {
+ "alias": " - moral_disputes",
+ "acc,none": 0.7225433526011561,
+ "acc_stderr,none": 0.024105712607754307
+ },
+ "mmlu_moral_scenarios": {
+ "alias": " - moral_scenarios",
+ "acc,none": 0.37206703910614525,
+ "acc_stderr,none": 0.016165847583563302
+ },
+ "mmlu_philosophy": {
+ "alias": " - philosophy",
+ "acc,none": 0.7170418006430869,
+ "acc_stderr,none": 0.02558306248998483
+ },
+ "mmlu_prehistory": {
+ "alias": " - prehistory",
+ "acc,none": 0.7345679012345679,
+ "acc_stderr,none": 0.02456922360046085
+ },
+ "mmlu_professional_law": {
+ "alias": " - professional_law",
+ "acc,none": 0.47979139504563234,
+ "acc_stderr,none": 0.01275980142776756
+ },
+ "mmlu_world_religions": {
+ "alias": " - world_religions",
+ "acc,none": 0.847953216374269,
+ "acc_stderr,none": 0.02753912288906145
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.702928870292887,
+ "acc_stderr,none": 0.007868349963426575
+ },
+ "mmlu_business_ethics": {
+ "alias": " - business_ethics",
+ "acc,none": 0.57,
+ "acc_stderr,none": 0.049756985195624284
+ },
+ "mmlu_clinical_knowledge": {
+ "alias": " - clinical_knowledge",
+ "acc,none": 0.6943396226415094,
+ "acc_stderr,none": 0.028353298073322666
+ },
+ "mmlu_college_medicine": {
+ "alias": " - college_medicine",
+ "acc,none": 0.6763005780346821,
+ "acc_stderr,none": 0.0356760379963917
+ },
+ "mmlu_global_facts": {
+ "alias": " - global_facts",
+ "acc,none": 0.32,
+ "acc_stderr,none": 0.046882617226215034
+ },
+ "mmlu_human_aging": {
+ "alias": " - human_aging",
+ "acc,none": 0.695067264573991,
+ "acc_stderr,none": 0.030898610882477518
+ },
+ "mmlu_management": {
+ "alias": " - management",
+ "acc,none": 0.7864077669902912,
+ "acc_stderr,none": 0.04058042015646034
+ },
+ "mmlu_marketing": {
+ "alias": " - marketing",
+ "acc,none": 0.8675213675213675,
+ "acc_stderr,none": 0.022209309073165616
+ },
+ "mmlu_medical_genetics": {
+ "alias": " - medical_genetics",
+ "acc,none": 0.7,
+ "acc_stderr,none": 0.046056618647183814
+ },
+ "mmlu_miscellaneous": {
+ "alias": " - miscellaneous",
+ "acc,none": 0.8275862068965517,
+ "acc_stderr,none": 0.013507943909371802
+ },
+ "mmlu_nutrition": {
+ "alias": " - nutrition",
+ "acc,none": 0.7320261437908496,
+ "acc_stderr,none": 0.025360603796242553
+ },
+ "mmlu_professional_accounting": {
+ "alias": " - professional_accounting",
+ "acc,none": 0.48936170212765956,
+ "acc_stderr,none": 0.029820747191422466
+ },
+ "mmlu_professional_medicine": {
+ "alias": " - professional_medicine",
+ "acc,none": 0.6838235294117647,
+ "acc_stderr,none": 0.028245687391462913
+ },
+ "mmlu_virology": {
+ "alias": " - virology",
+ "acc,none": 0.536144578313253,
+ "acc_stderr,none": 0.03882310850890593
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7409814754631134,
+ "acc_stderr,none": 0.0077233871931608284
+ },
+ "mmlu_econometrics": {
+ "alias": " - econometrics",
+ "acc,none": 0.5087719298245614,
+ "acc_stderr,none": 0.04702880432049615
+ },
+ "mmlu_high_school_geography": {
+ "alias": " - high_school_geography",
+ "acc,none": 0.803030303030303,
+ "acc_stderr,none": 0.028335609732463362
+ },
+ "mmlu_high_school_government_and_politics": {
+ "alias": " - high_school_government_and_politics",
+ "acc,none": 0.8808290155440415,
+ "acc_stderr,none": 0.023381935348121427
+ },
+ "mmlu_high_school_macroeconomics": {
+ "alias": " - high_school_macroeconomics",
+ "acc,none": 0.6717948717948717,
+ "acc_stderr,none": 0.023807633198657266
+ },
+ "mmlu_high_school_microeconomics": {
+ "alias": " - high_school_microeconomics",
+ "acc,none": 0.7100840336134454,
+ "acc_stderr,none": 0.029472485833136094
+ },
+ "mmlu_high_school_psychology": {
+ "alias": " - high_school_psychology",
+ "acc,none": 0.8366972477064221,
+ "acc_stderr,none": 0.01584825580650152
+ },
+ "mmlu_human_sexuality": {
+ "alias": " - human_sexuality",
+ "acc,none": 0.7557251908396947,
+ "acc_stderr,none": 0.037683359597287434
+ },
+ "mmlu_professional_psychology": {
+ "alias": " - professional_psychology",
+ "acc,none": 0.6454248366013072,
+ "acc_stderr,none": 0.019353360547553693
+ },
+ "mmlu_public_relations": {
+ "alias": " - public_relations",
+ "acc,none": 0.6727272727272727,
+ "acc_stderr,none": 0.0449429086625209
+ },
+ "mmlu_security_studies": {
+ "alias": " - security_studies",
+ "acc,none": 0.7346938775510204,
+ "acc_stderr,none": 0.028263889943784603
+ },
+ "mmlu_sociology": {
+ "alias": " - sociology",
+ "acc,none": 0.8507462686567164,
+ "acc_stderr,none": 0.0251969298748271
+ },
+ "mmlu_us_foreign_policy": {
+ "alias": " - us_foreign_policy",
+ "acc,none": 0.87,
+ "acc_stderr,none": 0.03379976689896308
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5350459879479861,
+ "acc_stderr,none": 0.008502490762016599
+ },
+ "mmlu_abstract_algebra": {
+ "alias": " - abstract_algebra",
+ "acc,none": 0.35,
+ "acc_stderr,none": 0.0479372485441102
+ },
+ "mmlu_anatomy": {
+ "alias": " - anatomy",
+ "acc,none": 0.6222222222222222,
+ "acc_stderr,none": 0.04188307537595853
+ },
+ "mmlu_astronomy": {
+ "alias": " - astronomy",
+ "acc,none": 0.6578947368421053,
+ "acc_stderr,none": 0.038607315993160904
+ },
+ "mmlu_college_biology": {
+ "alias": " - college_biology",
+ "acc,none": 0.7638888888888888,
+ "acc_stderr,none": 0.03551446610810826
+ },
+ "mmlu_college_chemistry": {
+ "alias": " - college_chemistry",
+ "acc,none": 0.47,
+ "acc_stderr,none": 0.05016135580465919
+ },
+ "mmlu_college_computer_science": {
+ "alias": " - college_computer_science",
+ "acc,none": 0.54,
+ "acc_stderr,none": 0.05009082659620332
+ },
+ "mmlu_college_mathematics": {
+ "alias": " - college_mathematics",
+ "acc,none": 0.4,
+ "acc_stderr,none": 0.049236596391733084
+ },
+ "mmlu_college_physics": {
+ "alias": " - college_physics",
+ "acc,none": 0.4117647058823529,
+ "acc_stderr,none": 0.04897104952726366
+ },
+ "mmlu_computer_security": {
+ "alias": " - computer_security",
+ "acc,none": 0.8,
+ "acc_stderr,none": 0.04020151261036846
+ },
+ "mmlu_conceptual_physics": {
+ "alias": " - conceptual_physics",
+ "acc,none": 0.5787234042553191,
+ "acc_stderr,none": 0.03227834510146268
+ },
+ "mmlu_electrical_engineering": {
+ "alias": " - electrical_engineering",
+ "acc,none": 0.5793103448275863,
+ "acc_stderr,none": 0.04113914981189261
+ },
+ "mmlu_elementary_mathematics": {
+ "alias": " - elementary_mathematics",
+ "acc,none": 0.3968253968253968,
+ "acc_stderr,none": 0.025197101074246483
+ },
+ "mmlu_high_school_biology": {
+ "alias": " - high_school_biology",
+ "acc,none": 0.7774193548387097,
+ "acc_stderr,none": 0.023664216671642525
+ },
+ "mmlu_high_school_chemistry": {
+ "alias": " - high_school_chemistry",
+ "acc,none": 0.5073891625615764,
+ "acc_stderr,none": 0.0351760354036101
+ },
+ "mmlu_high_school_computer_science": {
+ "alias": " - high_school_computer_science",
+ "acc,none": 0.72,
+ "acc_stderr,none": 0.045126085985421276
+ },
+ "mmlu_high_school_mathematics": {
+ "alias": " - high_school_mathematics",
+ "acc,none": 0.34814814814814815,
+ "acc_stderr,none": 0.029045600290616258
+ },
+ "mmlu_high_school_physics": {
+ "alias": " - high_school_physics",
+ "acc,none": 0.3509933774834437,
+ "acc_stderr,none": 0.03896981964257375
+ },
+ "mmlu_high_school_statistics": {
+ "alias": " - high_school_statistics",
+ "acc,none": 0.5046296296296297,
+ "acc_stderr,none": 0.03409825519163572
+ },
+ "mmlu_machine_learning": {
+ "alias": " - machine_learning",
+ "acc,none": 0.4732142857142857,
+ "acc_stderr,none": 0.047389751192741546
+ },
+ "truthfulqa": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "acc,none": 0.5163944376892423,
+ "acc_stderr,none": 0.011629460414206856,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa"
+ },
+ "truthfulqa_gen": {
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374931,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799713,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa_gen"
+ },
+ "truthfulqa_mc1": {
+ "acc,none": 0.4320685434516524,
+ "acc_stderr,none": 0.01734120239498826,
+ "alias": " - truthfulqa_mc1"
+ },
+ "truthfulqa_mc2": {
+ "acc,none": 0.6007203319268323,
+ "acc_stderr,none": 0.015500325725560432,
+ "alias": " - truthfulqa_mc2"
+ },
+ "winogrande": {
+ "acc,none": 0.7734806629834254,
+ "acc_stderr,none": 0.01176414905469832,
+ "alias": " - winogrande"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.12395548919517,
+ "eqbench_stderr,none": 2.1553076487761045,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "groups": {
+ "Open LLM Leaderboard": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571269,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "acc_norm,none": 0.8349384697699305,
+ "acc_norm_stderr,none": 0.0034656728893589055,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "acc,none": 0.6524833304898358,
+ "acc_stderr,none": 0.002755144452920947,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": "Open LLM Leaderboard"
+ },
+ "mmlu": {
+ "acc,none": 0.6370175188719556,
+ "acc_stderr,none": 0.0038187579064371084,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.593836344314559,
+ "acc_stderr,none": 0.006701956606258013
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.702928870292887,
+ "acc_stderr,none": 0.007868349963426575
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7409814754631134,
+ "acc_stderr,none": 0.0077233871931608284
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5350459879479861,
+ "acc_stderr,none": 0.008502490762016599
+ },
+ "truthfulqa": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "acc,none": 0.5163944376892423,
+ "acc_stderr,none": 0.011629460414206856,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "truthfulqa": [
+ "truthfulqa_gen",
+ "truthfulqa_mc1",
+ "truthfulqa_mc2"
+ ],
+ "mmlu_stem": [
+ "mmlu_high_school_chemistry",
+ "mmlu_college_physics",
+ "mmlu_college_mathematics",
+ "mmlu_astronomy",
+ "mmlu_high_school_physics",
+ "mmlu_computer_security",
+ "mmlu_elementary_mathematics",
+ "mmlu_electrical_engineering",
+ "mmlu_college_biology",
+ "mmlu_machine_learning",
+ "mmlu_high_school_biology",
+ "mmlu_high_school_mathematics",
+ "mmlu_anatomy",
+ "mmlu_high_school_statistics",
+ "mmlu_college_chemistry",
+ "mmlu_conceptual_physics",
+ "mmlu_high_school_computer_science",
+ "mmlu_college_computer_science",
+ "mmlu_abstract_algebra"
+ ],
+ "mmlu_other": [
+ "mmlu_professional_medicine",
+ "mmlu_professional_accounting",
+ "mmlu_management",
+ "mmlu_global_facts",
+ "mmlu_college_medicine",
+ "mmlu_business_ethics",
+ "mmlu_nutrition",
+ "mmlu_medical_genetics",
+ "mmlu_virology",
+ "mmlu_human_aging",
+ "mmlu_clinical_knowledge",
+ "mmlu_miscellaneous",
+ "mmlu_marketing"
+ ],
+ "mmlu_social_sciences": [
+ "mmlu_high_school_psychology",
+ "mmlu_sociology",
+ "mmlu_high_school_government_and_politics",
+ "mmlu_public_relations",
+ "mmlu_high_school_macroeconomics",
+ "mmlu_high_school_geography",
+ "mmlu_high_school_microeconomics",
+ "mmlu_security_studies",
+ "mmlu_us_foreign_policy",
+ "mmlu_professional_psychology",
+ "mmlu_human_sexuality",
+ "mmlu_econometrics"
+ ],
+ "mmlu_humanities": [
+ "mmlu_high_school_european_history",
+ "mmlu_formal_logic",
+ "mmlu_moral_scenarios",
+ "mmlu_moral_disputes",
+ "mmlu_world_religions",
+ "mmlu_high_school_world_history",
+ "mmlu_logical_fallacies",
+ "mmlu_international_law",
+ "mmlu_philosophy",
+ "mmlu_professional_law",
+ "mmlu_high_school_us_history",
+ "mmlu_prehistory",
+ "mmlu_jurisprudence"
+ ],
+ "mmlu": [
+ "mmlu_humanities",
+ "mmlu_social_sciences",
+ "mmlu_other",
+ "mmlu_stem"
+ ],
+ "Open LLM Leaderboard": [
+ "gsm8k",
+ "winogrande",
+ "mmlu",
+ "truthfulqa",
+ "hellaswag",
+ "arc_challenge"
+ ]
+ },
+ "configs": {
+ "arc_challenge": {
+ "task": "arc_challenge",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "allenai/ai2_arc",
+ "dataset_name": "ARC-Challenge",
+ "training_split": "train",
+ "validation_split": "validation",
+ "test_split": "test",
+ "fewshot_split": "validation",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
+ "doc_to_choice": "{{choices.text}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 25,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "gsm8k": {
+ "task": "gsm8k",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "gsm8k",
+ "dataset_name": "main",
+ "training_split": "train",
+ "test_split": "test",
+ "fewshot_split": "train",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{answer}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "exact_match",
+ "aggregation": "mean",
+ "higher_is_better": true,
+ "ignore_case": true,
+ "ignore_punctuation": false,
+ "regexes_to_ignore": [
+ ",",
+ "\\$",
+ "(?s).*#### ",
+ "\\.$"
+ ]
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "Question:",
+ "",
+ "<|im_end|>"
+ ],
+ "do_sample": false,
+ "temperature": 0.0
+ },
+ "repeats": 1,
+ "filter_list": [
+ {
+ "name": "strict-match",
+ "filter": [
+ {
+ "function": "regex",
+ "regex_pattern": "#### (\\-?[0-9\\.\\,]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ },
+ {
+ "name": "flexible-extract",
+ "filter": [
+ {
+ "function": "regex",
+ "group_select": -1,
+ "regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ }
+ ],
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 10,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "mmlu_abstract_algebra": {
+ "task": "mmlu_abstract_algebra",
+ "task_alias": "abstract_algebra",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "abstract_algebra",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_anatomy": {
+ "task": "mmlu_anatomy",
+ "task_alias": "anatomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "anatomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_astronomy": {
+ "task": "mmlu_astronomy",
+ "task_alias": "astronomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "astronomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_business_ethics": {
+ "task": "mmlu_business_ethics",
+ "task_alias": "business_ethics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "business_ethics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_clinical_knowledge": {
+ "task": "mmlu_clinical_knowledge",
+ "task_alias": "clinical_knowledge",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "clinical_knowledge",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_biology": {
+ "task": "mmlu_college_biology",
+ "task_alias": "college_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_chemistry": {
+ "task": "mmlu_college_chemistry",
+ "task_alias": "college_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_computer_science": {
+ "task": "mmlu_college_computer_science",
+ "task_alias": "college_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_mathematics": {
+ "task": "mmlu_college_mathematics",
+ "task_alias": "college_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_medicine": {
+ "task": "mmlu_college_medicine",
+ "task_alias": "college_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_physics": {
+ "task": "mmlu_college_physics",
+ "task_alias": "college_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_computer_security": {
+ "task": "mmlu_computer_security",
+ "task_alias": "computer_security",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "computer_security",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_conceptual_physics": {
+ "task": "mmlu_conceptual_physics",
+ "task_alias": "conceptual_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "conceptual_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_econometrics": {
+ "task": "mmlu_econometrics",
+ "task_alias": "econometrics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "econometrics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_electrical_engineering": {
+ "task": "mmlu_electrical_engineering",
+ "task_alias": "electrical_engineering",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "electrical_engineering",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_elementary_mathematics": {
+ "task": "mmlu_elementary_mathematics",
+ "task_alias": "elementary_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "elementary_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_formal_logic": {
+ "task": "mmlu_formal_logic",
+ "task_alias": "formal_logic",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "formal_logic",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_global_facts": {
+ "task": "mmlu_global_facts",
+ "task_alias": "global_facts",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "global_facts",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_biology": {
+ "task": "mmlu_high_school_biology",
+ "task_alias": "high_school_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_chemistry": {
+ "task": "mmlu_high_school_chemistry",
+ "task_alias": "high_school_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_computer_science": {
+ "task": "mmlu_high_school_computer_science",
+ "task_alias": "high_school_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_european_history": {
+ "task": "mmlu_high_school_european_history",
+ "task_alias": "high_school_european_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_european_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_geography": {
+ "task": "mmlu_high_school_geography",
+ "task_alias": "high_school_geography",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_geography",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_government_and_politics": {
+ "task": "mmlu_high_school_government_and_politics",
+ "task_alias": "high_school_government_and_politics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_government_and_politics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_macroeconomics": {
+ "task": "mmlu_high_school_macroeconomics",
+ "task_alias": "high_school_macroeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_macroeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_mathematics": {
+ "task": "mmlu_high_school_mathematics",
+ "task_alias": "high_school_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_microeconomics": {
+ "task": "mmlu_high_school_microeconomics",
+ "task_alias": "high_school_microeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_microeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_physics": {
+ "task": "mmlu_high_school_physics",
+ "task_alias": "high_school_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_psychology": {
+ "task": "mmlu_high_school_psychology",
+ "task_alias": "high_school_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_statistics": {
+ "task": "mmlu_high_school_statistics",
+ "task_alias": "high_school_statistics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_statistics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_us_history": {
+ "task": "mmlu_high_school_us_history",
+ "task_alias": "high_school_us_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_us_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_world_history": {
+ "task": "mmlu_high_school_world_history",
+ "task_alias": "high_school_world_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_world_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_aging": {
+ "task": "mmlu_human_aging",
+ "task_alias": "human_aging",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_aging",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_sexuality": {
+ "task": "mmlu_human_sexuality",
+ "task_alias": "human_sexuality",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_sexuality",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_international_law": {
+ "task": "mmlu_international_law",
+ "task_alias": "international_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "international_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_jurisprudence": {
+ "task": "mmlu_jurisprudence",
+ "task_alias": "jurisprudence",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "jurisprudence",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_logical_fallacies": {
+ "task": "mmlu_logical_fallacies",
+ "task_alias": "logical_fallacies",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "logical_fallacies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_machine_learning": {
+ "task": "mmlu_machine_learning",
+ "task_alias": "machine_learning",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "machine_learning",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_management": {
+ "task": "mmlu_management",
+ "task_alias": "management",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "management",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_marketing": {
+ "task": "mmlu_marketing",
+ "task_alias": "marketing",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "marketing",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_medical_genetics": {
+ "task": "mmlu_medical_genetics",
+ "task_alias": "medical_genetics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "medical_genetics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_miscellaneous": {
+ "task": "mmlu_miscellaneous",
+ "task_alias": "miscellaneous",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "miscellaneous",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_disputes": {
+ "task": "mmlu_moral_disputes",
+ "task_alias": "moral_disputes",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_disputes",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_scenarios": {
+ "task": "mmlu_moral_scenarios",
+ "task_alias": "moral_scenarios",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_scenarios",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_nutrition": {
+ "task": "mmlu_nutrition",
+ "task_alias": "nutrition",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "nutrition",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_philosophy": {
+ "task": "mmlu_philosophy",
+ "task_alias": "philosophy",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "philosophy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_prehistory": {
+ "task": "mmlu_prehistory",
+ "task_alias": "prehistory",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "prehistory",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_accounting": {
+ "task": "mmlu_professional_accounting",
+ "task_alias": "professional_accounting",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_accounting",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_law": {
+ "task": "mmlu_professional_law",
+ "task_alias": "professional_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_medicine": {
+ "task": "mmlu_professional_medicine",
+ "task_alias": "professional_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_psychology": {
+ "task": "mmlu_professional_psychology",
+ "task_alias": "professional_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_public_relations": {
+ "task": "mmlu_public_relations",
+ "task_alias": "public_relations",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "public_relations",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_security_studies": {
+ "task": "mmlu_security_studies",
+ "task_alias": "security_studies",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "security_studies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_sociology": {
+ "task": "mmlu_sociology",
+ "task_alias": "sociology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "sociology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_us_foreign_policy": {
+ "task": "mmlu_us_foreign_policy",
+ "task_alias": "us_foreign_policy",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "us_foreign_policy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_virology": {
+ "task": "mmlu_virology",
+ "task_alias": "virology",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "virology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_world_religions": {
+ "task": "mmlu_world_religions",
+ "task_alias": "world_religions",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "world_religions",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "truthfulqa_gen": {
+ "task": "truthfulqa_gen",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "generation",
+ "validation_split": "validation",
+ "process_docs": "def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:\n return dataset.map(preprocess_function)\n",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question}}",
+ "doc_to_target": " ",
+ "process_results": "def process_results_gen(doc, results):\n completion = results[0]\n true_refs, false_refs = doc[\"correct_answers\"], doc[\"incorrect_answers\"]\n all_refs = true_refs + false_refs\n\n # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.\n\n # # BLEURT\n # bleurt_scores_true = self.bleurt.compute(\n # predictions=[completion] * len(true_refs), references=true_refs\n # )[\"scores\"]\n # bleurt_scores_false = self.bleurt.compute(\n # predictions=[completion] * len(false_refs), references=false_refs\n # )[\"scores\"]\n # bleurt_correct = max(bleurt_scores_true)\n # bleurt_incorrect = max(bleurt_scores_false)\n # bleurt_max = bleurt_correct\n # bleurt_diff = bleurt_correct - bleurt_incorrect\n # bleurt_acc = int(bleurt_correct > bleurt_incorrect)\n\n # BLEU\n bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]\n bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])\n bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])\n bleu_max = bleu_correct\n bleu_diff = bleu_correct - bleu_incorrect\n bleu_acc = int(bleu_correct > bleu_incorrect)\n\n # ROUGE-N\n rouge_scores = [rouge([ref], [completion]) for ref in all_refs]\n # ROUGE-1\n rouge1_scores = [score[\"rouge1\"] for score in rouge_scores]\n rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])\n rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])\n rouge1_max = rouge1_correct\n rouge1_diff = rouge1_correct - rouge1_incorrect\n rouge1_acc = int(rouge1_correct > rouge1_incorrect)\n # ROUGE-2\n rouge2_scores = [score[\"rouge2\"] for score in rouge_scores]\n rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])\n rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])\n rouge2_max = rouge2_correct\n rouge2_diff = rouge2_correct - rouge2_incorrect\n rouge2_acc = int(rouge2_correct > rouge2_incorrect)\n # ROUGE-L\n rougeL_scores = [score[\"rougeLsum\"] for score in rouge_scores]\n rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])\n rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])\n rougeL_max = rougeL_correct\n rougeL_diff = rougeL_correct - rougeL_incorrect\n rougeL_acc = int(rougeL_correct > rougeL_incorrect)\n\n return {\n # \"bleurt_max\": bleurt_max,\n # \"bleurt_acc\": bleurt_acc,\n # \"bleurt_diff\": bleurt_diff,\n \"bleu_max\": bleu_max,\n \"bleu_acc\": bleu_acc,\n \"bleu_diff\": bleu_diff,\n \"rouge1_max\": rouge1_max,\n \"rouge1_acc\": rouge1_acc,\n \"rouge1_diff\": rouge1_diff,\n \"rouge2_max\": rouge2_max,\n \"rouge2_acc\": rouge2_acc,\n \"rouge2_diff\": rouge2_diff,\n \"rougeL_max\": rougeL_max,\n \"rougeL_acc\": rougeL_acc,\n \"rougeL_diff\": rougeL_diff,\n }\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "bleu_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "\n\n"
+ ],
+ "do_sample": false
+ },
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "truthfulqa_mc1": {
+ "task": "truthfulqa_mc1",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc1_targets.choices}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "truthfulqa_mc2": {
+ "task": "truthfulqa_mc2",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc2_targets.choices}}",
+ "process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "winogrande": {
+ "task": "winogrande",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "winogrande",
+ "dataset_name": "winogrande_xl",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
+ "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
+ "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "sentence",
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "arc_challenge": 1.0,
+ "eq_bench": 2.1,
+ "gsm8k": 3.0,
+ "hellaswag": 1.0,
+ "mmlu_abstract_algebra": 0.0,
+ "mmlu_anatomy": 0.0,
+ "mmlu_astronomy": 0.0,
+ "mmlu_business_ethics": 0.0,
+ "mmlu_clinical_knowledge": 0.0,
+ "mmlu_college_biology": 0.0,
+ "mmlu_college_chemistry": 0.0,
+ "mmlu_college_computer_science": 0.0,
+ "mmlu_college_mathematics": 0.0,
+ "mmlu_college_medicine": 0.0,
+ "mmlu_college_physics": 0.0,
+ "mmlu_computer_security": 0.0,
+ "mmlu_conceptual_physics": 0.0,
+ "mmlu_econometrics": 0.0,
+ "mmlu_electrical_engineering": 0.0,
+ "mmlu_elementary_mathematics": 0.0,
+ "mmlu_formal_logic": 0.0,
+ "mmlu_global_facts": 0.0,
+ "mmlu_high_school_biology": 0.0,
+ "mmlu_high_school_chemistry": 0.0,
+ "mmlu_high_school_computer_science": 0.0,
+ "mmlu_high_school_european_history": 0.0,
+ "mmlu_high_school_geography": 0.0,
+ "mmlu_high_school_government_and_politics": 0.0,
+ "mmlu_high_school_macroeconomics": 0.0,
+ "mmlu_high_school_mathematics": 0.0,
+ "mmlu_high_school_microeconomics": 0.0,
+ "mmlu_high_school_physics": 0.0,
+ "mmlu_high_school_psychology": 0.0,
+ "mmlu_high_school_statistics": 0.0,
+ "mmlu_high_school_us_history": 0.0,
+ "mmlu_high_school_world_history": 0.0,
+ "mmlu_human_aging": 0.0,
+ "mmlu_human_sexuality": 0.0,
+ "mmlu_international_law": 0.0,
+ "mmlu_jurisprudence": 0.0,
+ "mmlu_logical_fallacies": 0.0,
+ "mmlu_machine_learning": 0.0,
+ "mmlu_management": 0.0,
+ "mmlu_marketing": 0.0,
+ "mmlu_medical_genetics": 0.0,
+ "mmlu_miscellaneous": 0.0,
+ "mmlu_moral_disputes": 0.0,
+ "mmlu_moral_scenarios": 0.0,
+ "mmlu_nutrition": 0.0,
+ "mmlu_philosophy": 0.0,
+ "mmlu_prehistory": 0.0,
+ "mmlu_professional_accounting": 0.0,
+ "mmlu_professional_law": 0.0,
+ "mmlu_professional_medicine": 0.0,
+ "mmlu_professional_psychology": 0.0,
+ "mmlu_public_relations": 0.0,
+ "mmlu_security_studies": 0.0,
+ "mmlu_sociology": 0.0,
+ "mmlu_us_foreign_policy": 0.0,
+ "mmlu_virology": 0.0,
+ "mmlu_world_religions": 0.0,
+ "truthfulqa_gen": 3.0,
+ "truthfulqa_mc1": 2.0,
+ "truthfulqa_mc2": 2.0,
+ "winogrande": 1.0
+ },
+ "n-shot": {
+ "Open LLM Leaderboard": 5,
+ "arc_challenge": 25,
+ "eq_bench": 0,
+ "gsm8k": 5,
+ "hellaswag": 10,
+ "mmlu": 0,
+ "mmlu_abstract_algebra": 5,
+ "mmlu_anatomy": 5,
+ "mmlu_astronomy": 5,
+ "mmlu_business_ethics": 5,
+ "mmlu_clinical_knowledge": 5,
+ "mmlu_college_biology": 5,
+ "mmlu_college_chemistry": 5,
+ "mmlu_college_computer_science": 5,
+ "mmlu_college_mathematics": 5,
+ "mmlu_college_medicine": 5,
+ "mmlu_college_physics": 5,
+ "mmlu_computer_security": 5,
+ "mmlu_conceptual_physics": 5,
+ "mmlu_econometrics": 5,
+ "mmlu_electrical_engineering": 5,
+ "mmlu_elementary_mathematics": 5,
+ "mmlu_formal_logic": 5,
+ "mmlu_global_facts": 5,
+ "mmlu_high_school_biology": 5,
+ "mmlu_high_school_chemistry": 5,
+ "mmlu_high_school_computer_science": 5,
+ "mmlu_high_school_european_history": 5,
+ "mmlu_high_school_geography": 5,
+ "mmlu_high_school_government_and_politics": 5,
+ "mmlu_high_school_macroeconomics": 5,
+ "mmlu_high_school_mathematics": 5,
+ "mmlu_high_school_microeconomics": 5,
+ "mmlu_high_school_physics": 5,
+ "mmlu_high_school_psychology": 5,
+ "mmlu_high_school_statistics": 5,
+ "mmlu_high_school_us_history": 5,
+ "mmlu_high_school_world_history": 5,
+ "mmlu_human_aging": 5,
+ "mmlu_human_sexuality": 5,
+ "mmlu_humanities": 5,
+ "mmlu_international_law": 5,
+ "mmlu_jurisprudence": 5,
+ "mmlu_logical_fallacies": 5,
+ "mmlu_machine_learning": 5,
+ "mmlu_management": 5,
+ "mmlu_marketing": 5,
+ "mmlu_medical_genetics": 5,
+ "mmlu_miscellaneous": 5,
+ "mmlu_moral_disputes": 5,
+ "mmlu_moral_scenarios": 5,
+ "mmlu_nutrition": 5,
+ "mmlu_other": 5,
+ "mmlu_philosophy": 5,
+ "mmlu_prehistory": 5,
+ "mmlu_professional_accounting": 5,
+ "mmlu_professional_law": 5,
+ "mmlu_professional_medicine": 5,
+ "mmlu_professional_psychology": 5,
+ "mmlu_public_relations": 5,
+ "mmlu_security_studies": 5,
+ "mmlu_social_sciences": 5,
+ "mmlu_sociology": 5,
+ "mmlu_stem": 5,
+ "mmlu_us_foreign_policy": 5,
+ "mmlu_virology": 5,
+ "mmlu_world_religions": 5,
+ "truthfulqa": 0,
+ "truthfulqa_gen": 0,
+ "truthfulqa_mc1": 0,
+ "truthfulqa_mc2": 0,
+ "winogrande": 5
+ },
+ "higher_is_better": {
+ "Open LLM Leaderboard": {
+ "exact_match": true,
+ "acc": true,
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc_norm": true
+ },
+ "arc_challenge": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "gsm8k": {
+ "exact_match": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "mmlu": {
+ "acc": true
+ },
+ "mmlu_abstract_algebra": {
+ "acc": true
+ },
+ "mmlu_anatomy": {
+ "acc": true
+ },
+ "mmlu_astronomy": {
+ "acc": true
+ },
+ "mmlu_business_ethics": {
+ "acc": true
+ },
+ "mmlu_clinical_knowledge": {
+ "acc": true
+ },
+ "mmlu_college_biology": {
+ "acc": true
+ },
+ "mmlu_college_chemistry": {
+ "acc": true
+ },
+ "mmlu_college_computer_science": {
+ "acc": true
+ },
+ "mmlu_college_mathematics": {
+ "acc": true
+ },
+ "mmlu_college_medicine": {
+ "acc": true
+ },
+ "mmlu_college_physics": {
+ "acc": true
+ },
+ "mmlu_computer_security": {
+ "acc": true
+ },
+ "mmlu_conceptual_physics": {
+ "acc": true
+ },
+ "mmlu_econometrics": {
+ "acc": true
+ },
+ "mmlu_electrical_engineering": {
+ "acc": true
+ },
+ "mmlu_elementary_mathematics": {
+ "acc": true
+ },
+ "mmlu_formal_logic": {
+ "acc": true
+ },
+ "mmlu_global_facts": {
+ "acc": true
+ },
+ "mmlu_high_school_biology": {
+ "acc": true
+ },
+ "mmlu_high_school_chemistry": {
+ "acc": true
+ },
+ "mmlu_high_school_computer_science": {
+ "acc": true
+ },
+ "mmlu_high_school_european_history": {
+ "acc": true
+ },
+ "mmlu_high_school_geography": {
+ "acc": true
+ },
+ "mmlu_high_school_government_and_politics": {
+ "acc": true
+ },
+ "mmlu_high_school_macroeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_mathematics": {
+ "acc": true
+ },
+ "mmlu_high_school_microeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_physics": {
+ "acc": true
+ },
+ "mmlu_high_school_psychology": {
+ "acc": true
+ },
+ "mmlu_high_school_statistics": {
+ "acc": true
+ },
+ "mmlu_high_school_us_history": {
+ "acc": true
+ },
+ "mmlu_high_school_world_history": {
+ "acc": true
+ },
+ "mmlu_human_aging": {
+ "acc": true
+ },
+ "mmlu_human_sexuality": {
+ "acc": true
+ },
+ "mmlu_humanities": {
+ "acc": true
+ },
+ "mmlu_international_law": {
+ "acc": true
+ },
+ "mmlu_jurisprudence": {
+ "acc": true
+ },
+ "mmlu_logical_fallacies": {
+ "acc": true
+ },
+ "mmlu_machine_learning": {
+ "acc": true
+ },
+ "mmlu_management": {
+ "acc": true
+ },
+ "mmlu_marketing": {
+ "acc": true
+ },
+ "mmlu_medical_genetics": {
+ "acc": true
+ },
+ "mmlu_miscellaneous": {
+ "acc": true
+ },
+ "mmlu_moral_disputes": {
+ "acc": true
+ },
+ "mmlu_moral_scenarios": {
+ "acc": true
+ },
+ "mmlu_nutrition": {
+ "acc": true
+ },
+ "mmlu_other": {
+ "acc": true
+ },
+ "mmlu_philosophy": {
+ "acc": true
+ },
+ "mmlu_prehistory": {
+ "acc": true
+ },
+ "mmlu_professional_accounting": {
+ "acc": true
+ },
+ "mmlu_professional_law": {
+ "acc": true
+ },
+ "mmlu_professional_medicine": {
+ "acc": true
+ },
+ "mmlu_professional_psychology": {
+ "acc": true
+ },
+ "mmlu_public_relations": {
+ "acc": true
+ },
+ "mmlu_security_studies": {
+ "acc": true
+ },
+ "mmlu_social_sciences": {
+ "acc": true
+ },
+ "mmlu_sociology": {
+ "acc": true
+ },
+ "mmlu_stem": {
+ "acc": true
+ },
+ "mmlu_us_foreign_policy": {
+ "acc": true
+ },
+ "mmlu_virology": {
+ "acc": true
+ },
+ "mmlu_world_religions": {
+ "acc": true
+ },
+ "truthfulqa": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc": true
+ },
+ "truthfulqa_gen": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true
+ },
+ "truthfulqa_mc1": {
+ "acc": true
+ },
+ "truthfulqa_mc2": {
+ "acc": true
+ },
+ "winogrande": {
+ "acc": true
+ }
+ },
+ "n-samples": {
+ "gsm8k": {
+ "original": 1319,
+ "effective": 1319
+ },
+ "winogrande": {
+ "original": 1267,
+ "effective": 1267
+ },
+ "mmlu_high_school_european_history": {
+ "original": 165,
+ "effective": 165
+ },
+ "mmlu_formal_logic": {
+ "original": 126,
+ "effective": 126
+ },
+ "mmlu_moral_scenarios": {
+ "original": 895,
+ "effective": 895
+ },
+ "mmlu_moral_disputes": {
+ "original": 346,
+ "effective": 346
+ },
+ "mmlu_world_religions": {
+ "original": 171,
+ "effective": 171
+ },
+ "mmlu_high_school_world_history": {
+ "original": 237,
+ "effective": 237
+ },
+ "mmlu_logical_fallacies": {
+ "original": 163,
+ "effective": 163
+ },
+ "mmlu_international_law": {
+ "original": 121,
+ "effective": 121
+ },
+ "mmlu_philosophy": {
+ "original": 311,
+ "effective": 311
+ },
+ "mmlu_professional_law": {
+ "original": 1534,
+ "effective": 1534
+ },
+ "mmlu_high_school_us_history": {
+ "original": 204,
+ "effective": 204
+ },
+ "mmlu_prehistory": {
+ "original": 324,
+ "effective": 324
+ },
+ "mmlu_jurisprudence": {
+ "original": 108,
+ "effective": 108
+ },
+ "mmlu_high_school_psychology": {
+ "original": 545,
+ "effective": 545
+ },
+ "mmlu_sociology": {
+ "original": 201,
+ "effective": 201
+ },
+ "mmlu_high_school_government_and_politics": {
+ "original": 193,
+ "effective": 193
+ },
+ "mmlu_public_relations": {
+ "original": 110,
+ "effective": 110
+ },
+ "mmlu_high_school_macroeconomics": {
+ "original": 390,
+ "effective": 390
+ },
+ "mmlu_high_school_geography": {
+ "original": 198,
+ "effective": 198
+ },
+ "mmlu_high_school_microeconomics": {
+ "original": 238,
+ "effective": 238
+ },
+ "mmlu_security_studies": {
+ "original": 245,
+ "effective": 245
+ },
+ "mmlu_us_foreign_policy": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_professional_psychology": {
+ "original": 612,
+ "effective": 612
+ },
+ "mmlu_human_sexuality": {
+ "original": 131,
+ "effective": 131
+ },
+ "mmlu_econometrics": {
+ "original": 114,
+ "effective": 114
+ },
+ "mmlu_professional_medicine": {
+ "original": 272,
+ "effective": 272
+ },
+ "mmlu_professional_accounting": {
+ "original": 282,
+ "effective": 282
+ },
+ "mmlu_management": {
+ "original": 103,
+ "effective": 103
+ },
+ "mmlu_global_facts": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_medicine": {
+ "original": 173,
+ "effective": 173
+ },
+ "mmlu_business_ethics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_nutrition": {
+ "original": 306,
+ "effective": 306
+ },
+ "mmlu_medical_genetics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_virology": {
+ "original": 166,
+ "effective": 166
+ },
+ "mmlu_human_aging": {
+ "original": 223,
+ "effective": 223
+ },
+ "mmlu_clinical_knowledge": {
+ "original": 265,
+ "effective": 265
+ },
+ "mmlu_miscellaneous": {
+ "original": 783,
+ "effective": 783
+ },
+ "mmlu_marketing": {
+ "original": 234,
+ "effective": 234
+ },
+ "mmlu_high_school_chemistry": {
+ "original": 203,
+ "effective": 203
+ },
+ "mmlu_college_physics": {
+ "original": 102,
+ "effective": 102
+ },
+ "mmlu_college_mathematics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_astronomy": {
+ "original": 152,
+ "effective": 152
+ },
+ "mmlu_high_school_physics": {
+ "original": 151,
+ "effective": 151
+ },
+ "mmlu_computer_security": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_elementary_mathematics": {
+ "original": 378,
+ "effective": 378
+ },
+ "mmlu_electrical_engineering": {
+ "original": 145,
+ "effective": 145
+ },
+ "mmlu_college_biology": {
+ "original": 144,
+ "effective": 144
+ },
+ "mmlu_machine_learning": {
+ "original": 112,
+ "effective": 112
+ },
+ "mmlu_high_school_biology": {
+ "original": 310,
+ "effective": 310
+ },
+ "mmlu_high_school_mathematics": {
+ "original": 270,
+ "effective": 270
+ },
+ "mmlu_anatomy": {
+ "original": 135,
+ "effective": 135
+ },
+ "mmlu_high_school_statistics": {
+ "original": 216,
+ "effective": 216
+ },
+ "mmlu_college_chemistry": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_conceptual_physics": {
+ "original": 235,
+ "effective": 235
+ },
+ "mmlu_high_school_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_abstract_algebra": {
+ "original": 100,
+ "effective": 100
+ },
+ "truthfulqa_gen": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc1": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc2": {
+ "original": 817,
+ "effective": 817
+ },
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "arc_challenge": {
+ "original": 1172,
+ "effective": 1172
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Smart-Lemon-Cookie-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "24a18cbcb94c55811593f89026c6fe51331f4a57",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 2
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719550043.4933457,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Smart-Lemon-Cookie-7B",
+ "model_name_sanitized": "FallenMerick__Smart-Lemon-Cookie-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 102426.774034499,
+ "end_time": 138957.776397903,
+ "total_evaluation_time_seconds": "36531.00236340401"
+}
\ No newline at end of file
diff --git a/FallenMerick__Smart-Lemon-Cookie-7B/results_2024-06-28T14-56-07.716918.json b/FallenMerick__Smart-Lemon-Cookie-7B/results_2024-06-28T14-56-07.716918.json
new file mode 100644
index 0000000000000000000000000000000000000000..9a2f104e8cba6d24182ceadad21a8f788548c78a
--- /dev/null
+++ b/FallenMerick__Smart-Lemon-Cookie-7B/results_2024-06-28T14-56-07.716918.json
@@ -0,0 +1,4035 @@
+{
+ "results": {
+ "Open LLM Leaderboard": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571269,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "acc_norm,none": 0.8349384697699305,
+ "acc_norm_stderr,none": 0.0034656728893589055,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "acc,none": 0.6524833304898358,
+ "acc_stderr,none": 0.002755144452920947,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": "Open LLM Leaderboard"
+ },
+ "arc_challenge": {
+ "acc,none": 0.6390784982935154,
+ "acc_stderr,none": 0.014034761386175458,
+ "acc_norm,none": 0.6706484641638225,
+ "acc_norm_stderr,none": 0.013734057652635473,
+ "alias": " - arc_challenge"
+ },
+ "gsm8k": {
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571267,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "alias": " - gsm8k"
+ },
+ "hellaswag": {
+ "acc,none": 0.6714797849034057,
+ "acc_stderr,none": 0.00468715199479105,
+ "acc_norm,none": 0.8541127265484963,
+ "acc_norm_stderr,none": 0.0035227174995242872,
+ "alias": " - hellaswag"
+ },
+ "mmlu": {
+ "acc,none": 0.6370175188719556,
+ "acc_stderr,none": 0.0038187579064371084,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.593836344314559,
+ "acc_stderr,none": 0.006701956606258013
+ },
+ "mmlu_formal_logic": {
+ "alias": " - formal_logic",
+ "acc,none": 0.4523809523809524,
+ "acc_stderr,none": 0.044518079590553275
+ },
+ "mmlu_high_school_european_history": {
+ "alias": " - high_school_european_history",
+ "acc,none": 0.7757575757575758,
+ "acc_stderr,none": 0.03256866661681102
+ },
+ "mmlu_high_school_us_history": {
+ "alias": " - high_school_us_history",
+ "acc,none": 0.8480392156862745,
+ "acc_stderr,none": 0.025195658428931792
+ },
+ "mmlu_high_school_world_history": {
+ "alias": " - high_school_world_history",
+ "acc,none": 0.8227848101265823,
+ "acc_stderr,none": 0.024856364184503238
+ },
+ "mmlu_international_law": {
+ "alias": " - international_law",
+ "acc,none": 0.8181818181818182,
+ "acc_stderr,none": 0.03520893951097654
+ },
+ "mmlu_jurisprudence": {
+ "alias": " - jurisprudence",
+ "acc,none": 0.8240740740740741,
+ "acc_stderr,none": 0.036809181416738807
+ },
+ "mmlu_logical_fallacies": {
+ "alias": " - logical_fallacies",
+ "acc,none": 0.7852760736196319,
+ "acc_stderr,none": 0.03226219377286774
+ },
+ "mmlu_moral_disputes": {
+ "alias": " - moral_disputes",
+ "acc,none": 0.7225433526011561,
+ "acc_stderr,none": 0.024105712607754307
+ },
+ "mmlu_moral_scenarios": {
+ "alias": " - moral_scenarios",
+ "acc,none": 0.37206703910614525,
+ "acc_stderr,none": 0.016165847583563302
+ },
+ "mmlu_philosophy": {
+ "alias": " - philosophy",
+ "acc,none": 0.7170418006430869,
+ "acc_stderr,none": 0.02558306248998483
+ },
+ "mmlu_prehistory": {
+ "alias": " - prehistory",
+ "acc,none": 0.7345679012345679,
+ "acc_stderr,none": 0.02456922360046085
+ },
+ "mmlu_professional_law": {
+ "alias": " - professional_law",
+ "acc,none": 0.47979139504563234,
+ "acc_stderr,none": 0.01275980142776756
+ },
+ "mmlu_world_religions": {
+ "alias": " - world_religions",
+ "acc,none": 0.847953216374269,
+ "acc_stderr,none": 0.02753912288906145
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.702928870292887,
+ "acc_stderr,none": 0.007868349963426575
+ },
+ "mmlu_business_ethics": {
+ "alias": " - business_ethics",
+ "acc,none": 0.57,
+ "acc_stderr,none": 0.049756985195624284
+ },
+ "mmlu_clinical_knowledge": {
+ "alias": " - clinical_knowledge",
+ "acc,none": 0.6943396226415094,
+ "acc_stderr,none": 0.028353298073322666
+ },
+ "mmlu_college_medicine": {
+ "alias": " - college_medicine",
+ "acc,none": 0.6763005780346821,
+ "acc_stderr,none": 0.0356760379963917
+ },
+ "mmlu_global_facts": {
+ "alias": " - global_facts",
+ "acc,none": 0.32,
+ "acc_stderr,none": 0.046882617226215034
+ },
+ "mmlu_human_aging": {
+ "alias": " - human_aging",
+ "acc,none": 0.695067264573991,
+ "acc_stderr,none": 0.030898610882477518
+ },
+ "mmlu_management": {
+ "alias": " - management",
+ "acc,none": 0.7864077669902912,
+ "acc_stderr,none": 0.04058042015646034
+ },
+ "mmlu_marketing": {
+ "alias": " - marketing",
+ "acc,none": 0.8675213675213675,
+ "acc_stderr,none": 0.022209309073165616
+ },
+ "mmlu_medical_genetics": {
+ "alias": " - medical_genetics",
+ "acc,none": 0.7,
+ "acc_stderr,none": 0.046056618647183814
+ },
+ "mmlu_miscellaneous": {
+ "alias": " - miscellaneous",
+ "acc,none": 0.8275862068965517,
+ "acc_stderr,none": 0.013507943909371802
+ },
+ "mmlu_nutrition": {
+ "alias": " - nutrition",
+ "acc,none": 0.7320261437908496,
+ "acc_stderr,none": 0.025360603796242553
+ },
+ "mmlu_professional_accounting": {
+ "alias": " - professional_accounting",
+ "acc,none": 0.48936170212765956,
+ "acc_stderr,none": 0.029820747191422466
+ },
+ "mmlu_professional_medicine": {
+ "alias": " - professional_medicine",
+ "acc,none": 0.6838235294117647,
+ "acc_stderr,none": 0.028245687391462913
+ },
+ "mmlu_virology": {
+ "alias": " - virology",
+ "acc,none": 0.536144578313253,
+ "acc_stderr,none": 0.03882310850890593
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7409814754631134,
+ "acc_stderr,none": 0.0077233871931608284
+ },
+ "mmlu_econometrics": {
+ "alias": " - econometrics",
+ "acc,none": 0.5087719298245614,
+ "acc_stderr,none": 0.04702880432049615
+ },
+ "mmlu_high_school_geography": {
+ "alias": " - high_school_geography",
+ "acc,none": 0.803030303030303,
+ "acc_stderr,none": 0.028335609732463362
+ },
+ "mmlu_high_school_government_and_politics": {
+ "alias": " - high_school_government_and_politics",
+ "acc,none": 0.8808290155440415,
+ "acc_stderr,none": 0.023381935348121427
+ },
+ "mmlu_high_school_macroeconomics": {
+ "alias": " - high_school_macroeconomics",
+ "acc,none": 0.6717948717948717,
+ "acc_stderr,none": 0.023807633198657266
+ },
+ "mmlu_high_school_microeconomics": {
+ "alias": " - high_school_microeconomics",
+ "acc,none": 0.7100840336134454,
+ "acc_stderr,none": 0.029472485833136094
+ },
+ "mmlu_high_school_psychology": {
+ "alias": " - high_school_psychology",
+ "acc,none": 0.8366972477064221,
+ "acc_stderr,none": 0.01584825580650152
+ },
+ "mmlu_human_sexuality": {
+ "alias": " - human_sexuality",
+ "acc,none": 0.7557251908396947,
+ "acc_stderr,none": 0.037683359597287434
+ },
+ "mmlu_professional_psychology": {
+ "alias": " - professional_psychology",
+ "acc,none": 0.6454248366013072,
+ "acc_stderr,none": 0.019353360547553693
+ },
+ "mmlu_public_relations": {
+ "alias": " - public_relations",
+ "acc,none": 0.6727272727272727,
+ "acc_stderr,none": 0.0449429086625209
+ },
+ "mmlu_security_studies": {
+ "alias": " - security_studies",
+ "acc,none": 0.7346938775510204,
+ "acc_stderr,none": 0.028263889943784603
+ },
+ "mmlu_sociology": {
+ "alias": " - sociology",
+ "acc,none": 0.8507462686567164,
+ "acc_stderr,none": 0.0251969298748271
+ },
+ "mmlu_us_foreign_policy": {
+ "alias": " - us_foreign_policy",
+ "acc,none": 0.87,
+ "acc_stderr,none": 0.03379976689896308
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5350459879479861,
+ "acc_stderr,none": 0.008502490762016599
+ },
+ "mmlu_abstract_algebra": {
+ "alias": " - abstract_algebra",
+ "acc,none": 0.35,
+ "acc_stderr,none": 0.0479372485441102
+ },
+ "mmlu_anatomy": {
+ "alias": " - anatomy",
+ "acc,none": 0.6222222222222222,
+ "acc_stderr,none": 0.04188307537595853
+ },
+ "mmlu_astronomy": {
+ "alias": " - astronomy",
+ "acc,none": 0.6578947368421053,
+ "acc_stderr,none": 0.038607315993160904
+ },
+ "mmlu_college_biology": {
+ "alias": " - college_biology",
+ "acc,none": 0.7638888888888888,
+ "acc_stderr,none": 0.03551446610810826
+ },
+ "mmlu_college_chemistry": {
+ "alias": " - college_chemistry",
+ "acc,none": 0.47,
+ "acc_stderr,none": 0.05016135580465919
+ },
+ "mmlu_college_computer_science": {
+ "alias": " - college_computer_science",
+ "acc,none": 0.54,
+ "acc_stderr,none": 0.05009082659620332
+ },
+ "mmlu_college_mathematics": {
+ "alias": " - college_mathematics",
+ "acc,none": 0.4,
+ "acc_stderr,none": 0.049236596391733084
+ },
+ "mmlu_college_physics": {
+ "alias": " - college_physics",
+ "acc,none": 0.4117647058823529,
+ "acc_stderr,none": 0.04897104952726366
+ },
+ "mmlu_computer_security": {
+ "alias": " - computer_security",
+ "acc,none": 0.8,
+ "acc_stderr,none": 0.04020151261036846
+ },
+ "mmlu_conceptual_physics": {
+ "alias": " - conceptual_physics",
+ "acc,none": 0.5787234042553191,
+ "acc_stderr,none": 0.03227834510146268
+ },
+ "mmlu_electrical_engineering": {
+ "alias": " - electrical_engineering",
+ "acc,none": 0.5793103448275863,
+ "acc_stderr,none": 0.04113914981189261
+ },
+ "mmlu_elementary_mathematics": {
+ "alias": " - elementary_mathematics",
+ "acc,none": 0.3968253968253968,
+ "acc_stderr,none": 0.025197101074246483
+ },
+ "mmlu_high_school_biology": {
+ "alias": " - high_school_biology",
+ "acc,none": 0.7774193548387097,
+ "acc_stderr,none": 0.023664216671642525
+ },
+ "mmlu_high_school_chemistry": {
+ "alias": " - high_school_chemistry",
+ "acc,none": 0.5073891625615764,
+ "acc_stderr,none": 0.0351760354036101
+ },
+ "mmlu_high_school_computer_science": {
+ "alias": " - high_school_computer_science",
+ "acc,none": 0.72,
+ "acc_stderr,none": 0.045126085985421276
+ },
+ "mmlu_high_school_mathematics": {
+ "alias": " - high_school_mathematics",
+ "acc,none": 0.34814814814814815,
+ "acc_stderr,none": 0.029045600290616258
+ },
+ "mmlu_high_school_physics": {
+ "alias": " - high_school_physics",
+ "acc,none": 0.3509933774834437,
+ "acc_stderr,none": 0.03896981964257375
+ },
+ "mmlu_high_school_statistics": {
+ "alias": " - high_school_statistics",
+ "acc,none": 0.5046296296296297,
+ "acc_stderr,none": 0.03409825519163572
+ },
+ "mmlu_machine_learning": {
+ "alias": " - machine_learning",
+ "acc,none": 0.4732142857142857,
+ "acc_stderr,none": 0.047389751192741546
+ },
+ "truthfulqa": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "acc,none": 0.5163944376892423,
+ "acc_stderr,none": 0.011629460414206856,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa"
+ },
+ "truthfulqa_gen": {
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374931,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799713,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa_gen"
+ },
+ "truthfulqa_mc1": {
+ "acc,none": 0.4320685434516524,
+ "acc_stderr,none": 0.01734120239498826,
+ "alias": " - truthfulqa_mc1"
+ },
+ "truthfulqa_mc2": {
+ "acc,none": 0.6007203319268323,
+ "acc_stderr,none": 0.015500325725560432,
+ "alias": " - truthfulqa_mc2"
+ },
+ "winogrande": {
+ "acc,none": 0.7734806629834254,
+ "acc_stderr,none": 0.01176414905469832,
+ "alias": " - winogrande"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.12395548919517,
+ "eqbench_stderr,none": 2.1553076487761045,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "groups": {
+ "Open LLM Leaderboard": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "exact_match,flexible-extract": 0.6573161485974223,
+ "exact_match_stderr,flexible-extract": 0.013073030230827912,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "exact_match,strict-match": 0.6497346474601972,
+ "exact_match_stderr,strict-match": 0.013140409455571269,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "acc_norm,none": 0.8349384697699305,
+ "acc_norm_stderr,none": 0.0034656728893589055,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "acc,none": 0.6524833304898358,
+ "acc_stderr,none": 0.002755144452920947,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": "Open LLM Leaderboard"
+ },
+ "mmlu": {
+ "acc,none": 0.6370175188719556,
+ "acc_stderr,none": 0.0038187579064371084,
+ "alias": " - mmlu"
+ },
+ "mmlu_humanities": {
+ "alias": " - humanities",
+ "acc,none": 0.593836344314559,
+ "acc_stderr,none": 0.006701956606258013
+ },
+ "mmlu_other": {
+ "alias": " - other",
+ "acc,none": 0.702928870292887,
+ "acc_stderr,none": 0.007868349963426575
+ },
+ "mmlu_social_sciences": {
+ "alias": " - social_sciences",
+ "acc,none": 0.7409814754631134,
+ "acc_stderr,none": 0.0077233871931608284
+ },
+ "mmlu_stem": {
+ "alias": " - stem",
+ "acc,none": 0.5350459879479861,
+ "acc_stderr,none": 0.008502490762016599
+ },
+ "truthfulqa": {
+ "bleu_acc,none": 0.48592411260709917,
+ "bleu_acc_stderr,none": 0.01749656371704277,
+ "rouge1_diff,none": 2.0497625977348237,
+ "rouge1_diff_stderr,none": 0.8467979858374932,
+ "rouge1_acc,none": 0.5067319461444308,
+ "rouge1_acc_stderr,none": 0.017501914492655368,
+ "rouge2_diff,none": 1.3428910448034004,
+ "rouge2_diff_stderr,none": 0.9648647176231531,
+ "rougeL_acc,none": 0.4981640146878825,
+ "rougeL_acc_stderr,none": 0.017503383046877072,
+ "bleu_max,none": 20.940311645567302,
+ "bleu_max_stderr,none": 0.7173140178916005,
+ "rouge2_acc,none": 0.4320685434516524,
+ "rouge2_acc_stderr,none": 0.01734120239498827,
+ "rouge1_max,none": 46.177982007870185,
+ "rouge1_max_stderr,none": 0.8131363401138358,
+ "rouge2_max,none": 32.18242146861712,
+ "rouge2_max_stderr,none": 0.9183747194799712,
+ "rougeL_max,none": 42.81466100258748,
+ "rougeL_max_stderr,none": 0.8340477381054907,
+ "acc,none": 0.5163944376892423,
+ "acc_stderr,none": 0.011629460414206856,
+ "bleu_diff,none": 1.3935266971798104,
+ "bleu_diff_stderr,none": 0.6400373603862807,
+ "rougeL_diff,none": 1.718464887616239,
+ "rougeL_diff_stderr,none": 0.8631878996298543,
+ "alias": " - truthfulqa"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "truthfulqa": [
+ "truthfulqa_gen",
+ "truthfulqa_mc1",
+ "truthfulqa_mc2"
+ ],
+ "mmlu_stem": [
+ "mmlu_high_school_chemistry",
+ "mmlu_college_physics",
+ "mmlu_college_mathematics",
+ "mmlu_astronomy",
+ "mmlu_high_school_physics",
+ "mmlu_computer_security",
+ "mmlu_elementary_mathematics",
+ "mmlu_electrical_engineering",
+ "mmlu_college_biology",
+ "mmlu_machine_learning",
+ "mmlu_high_school_biology",
+ "mmlu_high_school_mathematics",
+ "mmlu_anatomy",
+ "mmlu_high_school_statistics",
+ "mmlu_college_chemistry",
+ "mmlu_conceptual_physics",
+ "mmlu_high_school_computer_science",
+ "mmlu_college_computer_science",
+ "mmlu_abstract_algebra"
+ ],
+ "mmlu_other": [
+ "mmlu_professional_medicine",
+ "mmlu_professional_accounting",
+ "mmlu_management",
+ "mmlu_global_facts",
+ "mmlu_college_medicine",
+ "mmlu_business_ethics",
+ "mmlu_nutrition",
+ "mmlu_medical_genetics",
+ "mmlu_virology",
+ "mmlu_human_aging",
+ "mmlu_clinical_knowledge",
+ "mmlu_miscellaneous",
+ "mmlu_marketing"
+ ],
+ "mmlu_social_sciences": [
+ "mmlu_high_school_psychology",
+ "mmlu_sociology",
+ "mmlu_high_school_government_and_politics",
+ "mmlu_public_relations",
+ "mmlu_high_school_macroeconomics",
+ "mmlu_high_school_geography",
+ "mmlu_high_school_microeconomics",
+ "mmlu_security_studies",
+ "mmlu_us_foreign_policy",
+ "mmlu_professional_psychology",
+ "mmlu_human_sexuality",
+ "mmlu_econometrics"
+ ],
+ "mmlu_humanities": [
+ "mmlu_high_school_european_history",
+ "mmlu_formal_logic",
+ "mmlu_moral_scenarios",
+ "mmlu_moral_disputes",
+ "mmlu_world_religions",
+ "mmlu_high_school_world_history",
+ "mmlu_logical_fallacies",
+ "mmlu_international_law",
+ "mmlu_philosophy",
+ "mmlu_professional_law",
+ "mmlu_high_school_us_history",
+ "mmlu_prehistory",
+ "mmlu_jurisprudence"
+ ],
+ "mmlu": [
+ "mmlu_humanities",
+ "mmlu_social_sciences",
+ "mmlu_other",
+ "mmlu_stem"
+ ],
+ "Open LLM Leaderboard": [
+ "gsm8k",
+ "winogrande",
+ "mmlu",
+ "truthfulqa",
+ "hellaswag",
+ "arc_challenge"
+ ]
+ },
+ "configs": {
+ "arc_challenge": {
+ "task": "arc_challenge",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "allenai/ai2_arc",
+ "dataset_name": "ARC-Challenge",
+ "training_split": "train",
+ "validation_split": "validation",
+ "test_split": "test",
+ "fewshot_split": "validation",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
+ "doc_to_choice": "{{choices.text}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 25,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:",
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "gsm8k": {
+ "task": "gsm8k",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "gsm8k",
+ "dataset_name": "main",
+ "training_split": "train",
+ "test_split": "test",
+ "fewshot_split": "train",
+ "doc_to_text": "Question: {{question}}\nAnswer:",
+ "doc_to_target": "{{answer}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "exact_match",
+ "aggregation": "mean",
+ "higher_is_better": true,
+ "ignore_case": true,
+ "ignore_punctuation": false,
+ "regexes_to_ignore": [
+ ",",
+ "\\$",
+ "(?s).*#### ",
+ "\\.$"
+ ]
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "Question:",
+ "",
+ "<|im_end|>"
+ ],
+ "do_sample": false,
+ "temperature": 0.0
+ },
+ "repeats": 1,
+ "filter_list": [
+ {
+ "name": "strict-match",
+ "filter": [
+ {
+ "function": "regex",
+ "regex_pattern": "#### (\\-?[0-9\\.\\,]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ },
+ {
+ "name": "flexible-extract",
+ "filter": [
+ {
+ "function": "regex",
+ "group_select": -1,
+ "regex_pattern": "(-?[$0-9.,]{2,})|(-?[0-9]+)"
+ },
+ {
+ "function": "take_first"
+ }
+ ]
+ }
+ ],
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 10,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ },
+ "mmlu_abstract_algebra": {
+ "task": "mmlu_abstract_algebra",
+ "task_alias": "abstract_algebra",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "abstract_algebra",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about abstract algebra.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_anatomy": {
+ "task": "mmlu_anatomy",
+ "task_alias": "anatomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "anatomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about anatomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_astronomy": {
+ "task": "mmlu_astronomy",
+ "task_alias": "astronomy",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "astronomy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about astronomy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_business_ethics": {
+ "task": "mmlu_business_ethics",
+ "task_alias": "business_ethics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "business_ethics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about business ethics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_clinical_knowledge": {
+ "task": "mmlu_clinical_knowledge",
+ "task_alias": "clinical_knowledge",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "clinical_knowledge",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about clinical knowledge.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_biology": {
+ "task": "mmlu_college_biology",
+ "task_alias": "college_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_chemistry": {
+ "task": "mmlu_college_chemistry",
+ "task_alias": "college_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_computer_science": {
+ "task": "mmlu_college_computer_science",
+ "task_alias": "college_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_mathematics": {
+ "task": "mmlu_college_mathematics",
+ "task_alias": "college_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_medicine": {
+ "task": "mmlu_college_medicine",
+ "task_alias": "college_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_college_physics": {
+ "task": "mmlu_college_physics",
+ "task_alias": "college_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "college_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about college physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_computer_security": {
+ "task": "mmlu_computer_security",
+ "task_alias": "computer_security",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "computer_security",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about computer security.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_conceptual_physics": {
+ "task": "mmlu_conceptual_physics",
+ "task_alias": "conceptual_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "conceptual_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about conceptual physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_econometrics": {
+ "task": "mmlu_econometrics",
+ "task_alias": "econometrics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "econometrics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about econometrics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_electrical_engineering": {
+ "task": "mmlu_electrical_engineering",
+ "task_alias": "electrical_engineering",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "electrical_engineering",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about electrical engineering.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_elementary_mathematics": {
+ "task": "mmlu_elementary_mathematics",
+ "task_alias": "elementary_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "elementary_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about elementary mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_formal_logic": {
+ "task": "mmlu_formal_logic",
+ "task_alias": "formal_logic",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "formal_logic",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about formal logic.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_global_facts": {
+ "task": "mmlu_global_facts",
+ "task_alias": "global_facts",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "global_facts",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about global facts.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_biology": {
+ "task": "mmlu_high_school_biology",
+ "task_alias": "high_school_biology",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_biology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school biology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_chemistry": {
+ "task": "mmlu_high_school_chemistry",
+ "task_alias": "high_school_chemistry",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_chemistry",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school chemistry.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_computer_science": {
+ "task": "mmlu_high_school_computer_science",
+ "task_alias": "high_school_computer_science",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_computer_science",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school computer science.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_european_history": {
+ "task": "mmlu_high_school_european_history",
+ "task_alias": "high_school_european_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_european_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school european history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_geography": {
+ "task": "mmlu_high_school_geography",
+ "task_alias": "high_school_geography",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_geography",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school geography.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_government_and_politics": {
+ "task": "mmlu_high_school_government_and_politics",
+ "task_alias": "high_school_government_and_politics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_government_and_politics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school government and politics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_macroeconomics": {
+ "task": "mmlu_high_school_macroeconomics",
+ "task_alias": "high_school_macroeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_macroeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school macroeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_mathematics": {
+ "task": "mmlu_high_school_mathematics",
+ "task_alias": "high_school_mathematics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_mathematics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school mathematics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_microeconomics": {
+ "task": "mmlu_high_school_microeconomics",
+ "task_alias": "high_school_microeconomics",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_microeconomics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school microeconomics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_physics": {
+ "task": "mmlu_high_school_physics",
+ "task_alias": "high_school_physics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_physics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school physics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_psychology": {
+ "task": "mmlu_high_school_psychology",
+ "task_alias": "high_school_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_statistics": {
+ "task": "mmlu_high_school_statistics",
+ "task_alias": "high_school_statistics",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_statistics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school statistics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_us_history": {
+ "task": "mmlu_high_school_us_history",
+ "task_alias": "high_school_us_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_us_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school us history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_high_school_world_history": {
+ "task": "mmlu_high_school_world_history",
+ "task_alias": "high_school_world_history",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "high_school_world_history",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about high school world history.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_aging": {
+ "task": "mmlu_human_aging",
+ "task_alias": "human_aging",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_aging",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human aging.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_human_sexuality": {
+ "task": "mmlu_human_sexuality",
+ "task_alias": "human_sexuality",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "human_sexuality",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about human sexuality.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_international_law": {
+ "task": "mmlu_international_law",
+ "task_alias": "international_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "international_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about international law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_jurisprudence": {
+ "task": "mmlu_jurisprudence",
+ "task_alias": "jurisprudence",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "jurisprudence",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_logical_fallacies": {
+ "task": "mmlu_logical_fallacies",
+ "task_alias": "logical_fallacies",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "logical_fallacies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about logical fallacies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_machine_learning": {
+ "task": "mmlu_machine_learning",
+ "task_alias": "machine_learning",
+ "group": "mmlu_stem",
+ "group_alias": "stem",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "machine_learning",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about machine learning.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_management": {
+ "task": "mmlu_management",
+ "task_alias": "management",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "management",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about management.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_marketing": {
+ "task": "mmlu_marketing",
+ "task_alias": "marketing",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "marketing",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about marketing.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_medical_genetics": {
+ "task": "mmlu_medical_genetics",
+ "task_alias": "medical_genetics",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "medical_genetics",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about medical genetics.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_miscellaneous": {
+ "task": "mmlu_miscellaneous",
+ "task_alias": "miscellaneous",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "miscellaneous",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about miscellaneous.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_disputes": {
+ "task": "mmlu_moral_disputes",
+ "task_alias": "moral_disputes",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_disputes",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral disputes.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_moral_scenarios": {
+ "task": "mmlu_moral_scenarios",
+ "task_alias": "moral_scenarios",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "moral_scenarios",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about moral scenarios.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_nutrition": {
+ "task": "mmlu_nutrition",
+ "task_alias": "nutrition",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "nutrition",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about nutrition.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_philosophy": {
+ "task": "mmlu_philosophy",
+ "task_alias": "philosophy",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "philosophy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about philosophy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_prehistory": {
+ "task": "mmlu_prehistory",
+ "task_alias": "prehistory",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "prehistory",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about prehistory.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_accounting": {
+ "task": "mmlu_professional_accounting",
+ "task_alias": "professional_accounting",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_accounting",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional accounting.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_law": {
+ "task": "mmlu_professional_law",
+ "task_alias": "professional_law",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_law",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional law.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_medicine": {
+ "task": "mmlu_professional_medicine",
+ "task_alias": "professional_medicine",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_medicine",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional medicine.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_professional_psychology": {
+ "task": "mmlu_professional_psychology",
+ "task_alias": "professional_psychology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "professional_psychology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about professional psychology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_public_relations": {
+ "task": "mmlu_public_relations",
+ "task_alias": "public_relations",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "public_relations",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about public relations.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_security_studies": {
+ "task": "mmlu_security_studies",
+ "task_alias": "security_studies",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "security_studies",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about security studies.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_sociology": {
+ "task": "mmlu_sociology",
+ "task_alias": "sociology",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "sociology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about sociology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_us_foreign_policy": {
+ "task": "mmlu_us_foreign_policy",
+ "task_alias": "us_foreign_policy",
+ "group": "mmlu_social_sciences",
+ "group_alias": "social_sciences",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "us_foreign_policy",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about us foreign policy.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_virology": {
+ "task": "mmlu_virology",
+ "task_alias": "virology",
+ "group": "mmlu_other",
+ "group_alias": "other",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "virology",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about virology.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "mmlu_world_religions": {
+ "task": "mmlu_world_religions",
+ "task_alias": "world_religions",
+ "group": "mmlu_humanities",
+ "group_alias": "humanities",
+ "dataset_path": "hails/mmlu_no_train",
+ "dataset_name": "world_religions",
+ "dataset_kwargs": {
+ "trust_remote_code": true
+ },
+ "test_split": "test",
+ "fewshot_split": "dev",
+ "doc_to_text": "{{question.strip()}}\nA. {{choices[0]}}\nB. {{choices[1]}}\nC. {{choices[2]}}\nD. {{choices[3]}}\nAnswer:",
+ "doc_to_target": "answer",
+ "doc_to_choice": [
+ "A",
+ "B",
+ "C",
+ "D"
+ ],
+ "description": "The following are multiple choice questions (with answers) about world religions.\n\n",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "fewshot_config": {
+ "sampler": "first_n"
+ },
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 0.0
+ }
+ },
+ "truthfulqa_gen": {
+ "task": "truthfulqa_gen",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "generation",
+ "validation_split": "validation",
+ "process_docs": "def process_docs_gen(dataset: datasets.Dataset) -> datasets.Dataset:\n return dataset.map(preprocess_function)\n",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question}}",
+ "doc_to_target": " ",
+ "process_results": "def process_results_gen(doc, results):\n completion = results[0]\n true_refs, false_refs = doc[\"correct_answers\"], doc[\"incorrect_answers\"]\n all_refs = true_refs + false_refs\n\n # Process the sentence-level BLEURT, BLEU, and ROUGE for similarity measures.\n\n # # BLEURT\n # bleurt_scores_true = self.bleurt.compute(\n # predictions=[completion] * len(true_refs), references=true_refs\n # )[\"scores\"]\n # bleurt_scores_false = self.bleurt.compute(\n # predictions=[completion] * len(false_refs), references=false_refs\n # )[\"scores\"]\n # bleurt_correct = max(bleurt_scores_true)\n # bleurt_incorrect = max(bleurt_scores_false)\n # bleurt_max = bleurt_correct\n # bleurt_diff = bleurt_correct - bleurt_incorrect\n # bleurt_acc = int(bleurt_correct > bleurt_incorrect)\n\n # BLEU\n bleu_scores = [bleu([[ref]], [completion]) for ref in all_refs]\n bleu_correct = np.nanmax(bleu_scores[: len(true_refs)])\n bleu_incorrect = np.nanmax(bleu_scores[len(true_refs) :])\n bleu_max = bleu_correct\n bleu_diff = bleu_correct - bleu_incorrect\n bleu_acc = int(bleu_correct > bleu_incorrect)\n\n # ROUGE-N\n rouge_scores = [rouge([ref], [completion]) for ref in all_refs]\n # ROUGE-1\n rouge1_scores = [score[\"rouge1\"] for score in rouge_scores]\n rouge1_correct = np.nanmax(rouge1_scores[: len(true_refs)])\n rouge1_incorrect = np.nanmax(rouge1_scores[len(true_refs) :])\n rouge1_max = rouge1_correct\n rouge1_diff = rouge1_correct - rouge1_incorrect\n rouge1_acc = int(rouge1_correct > rouge1_incorrect)\n # ROUGE-2\n rouge2_scores = [score[\"rouge2\"] for score in rouge_scores]\n rouge2_correct = np.nanmax(rouge2_scores[: len(true_refs)])\n rouge2_incorrect = np.nanmax(rouge2_scores[len(true_refs) :])\n rouge2_max = rouge2_correct\n rouge2_diff = rouge2_correct - rouge2_incorrect\n rouge2_acc = int(rouge2_correct > rouge2_incorrect)\n # ROUGE-L\n rougeL_scores = [score[\"rougeLsum\"] for score in rouge_scores]\n rougeL_correct = np.nanmax(rougeL_scores[: len(true_refs)])\n rougeL_incorrect = np.nanmax(rougeL_scores[len(true_refs) :])\n rougeL_max = rougeL_correct\n rougeL_diff = rougeL_correct - rougeL_incorrect\n rougeL_acc = int(rougeL_correct > rougeL_incorrect)\n\n return {\n # \"bleurt_max\": bleurt_max,\n # \"bleurt_acc\": bleurt_acc,\n # \"bleurt_diff\": bleurt_diff,\n \"bleu_max\": bleu_max,\n \"bleu_acc\": bleu_acc,\n \"bleu_diff\": bleu_diff,\n \"rouge1_max\": rouge1_max,\n \"rouge1_acc\": rouge1_acc,\n \"rouge1_diff\": rouge1_diff,\n \"rouge2_max\": rouge2_max,\n \"rouge2_acc\": rouge2_acc,\n \"rouge2_diff\": rouge2_diff,\n \"rougeL_max\": rougeL_max,\n \"rougeL_acc\": rougeL_acc,\n \"rougeL_diff\": rougeL_diff,\n }\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "bleu_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "bleu_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge1_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rouge2_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_max",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "rougeL_diff",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "until": [
+ "\n\n"
+ ],
+ "do_sample": false
+ },
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 3.0
+ }
+ },
+ "truthfulqa_mc1": {
+ "task": "truthfulqa_mc1",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc1_targets.choices}}",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "truthfulqa_mc2": {
+ "task": "truthfulqa_mc2",
+ "group": "truthfulqa",
+ "dataset_path": "truthful_qa",
+ "dataset_name": "multiple_choice",
+ "validation_split": "validation",
+ "doc_to_text": "{% set prompt_qa = 'Q: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: I have no comment.\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.'%}{{prompt_qa + '\n\nQ: ' + question + '\nA:'}}",
+ "doc_to_target": 0,
+ "doc_to_choice": "{{mc2_targets.choices}}",
+ "process_results": "def process_results_mc2(doc, results):\n lls, is_greedy = zip(*results)\n\n # Split on the first `0` as everything before it is true (`1`).\n split_idx = list(doc[\"mc2_targets\"][\"labels\"]).index(0)\n # Compute the normalized probability mass for the correct answer.\n ll_true, ll_false = lls[:split_idx], lls[split_idx:]\n p_true, p_false = np.exp(np.array(ll_true)), np.exp(np.array(ll_false))\n p_true = p_true / (sum(p_true) + sum(p_false))\n\n return {\"acc\": sum(p_true)}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "question",
+ "metadata": {
+ "version": 2.0
+ }
+ },
+ "winogrande": {
+ "task": "winogrande",
+ "group": "Open LLM Leaderboard",
+ "dataset_path": "winogrande",
+ "dataset_name": "winogrande_xl",
+ "training_split": "train",
+ "validation_split": "validation",
+ "fewshot_split": "train",
+ "doc_to_text": "def doc_to_text(doc):\n answer_to_num = {\"1\": 0, \"2\": 1}\n return answer_to_num[doc[\"answer\"]]\n",
+ "doc_to_target": "def doc_to_target(doc):\n idx = doc[\"sentence\"].index(\"_\") + 1\n return doc[\"sentence\"][idx:].strip()\n",
+ "doc_to_choice": "def doc_to_choice(doc):\n idx = doc[\"sentence\"].index(\"_\")\n options = [doc[\"option1\"], doc[\"option2\"]]\n return [doc[\"sentence\"][:idx] + opt for opt in options]\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 5,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": true,
+ "doc_to_decontamination_query": "sentence",
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "arc_challenge": 1.0,
+ "eq_bench": 2.1,
+ "gsm8k": 3.0,
+ "hellaswag": 1.0,
+ "mmlu_abstract_algebra": 0.0,
+ "mmlu_anatomy": 0.0,
+ "mmlu_astronomy": 0.0,
+ "mmlu_business_ethics": 0.0,
+ "mmlu_clinical_knowledge": 0.0,
+ "mmlu_college_biology": 0.0,
+ "mmlu_college_chemistry": 0.0,
+ "mmlu_college_computer_science": 0.0,
+ "mmlu_college_mathematics": 0.0,
+ "mmlu_college_medicine": 0.0,
+ "mmlu_college_physics": 0.0,
+ "mmlu_computer_security": 0.0,
+ "mmlu_conceptual_physics": 0.0,
+ "mmlu_econometrics": 0.0,
+ "mmlu_electrical_engineering": 0.0,
+ "mmlu_elementary_mathematics": 0.0,
+ "mmlu_formal_logic": 0.0,
+ "mmlu_global_facts": 0.0,
+ "mmlu_high_school_biology": 0.0,
+ "mmlu_high_school_chemistry": 0.0,
+ "mmlu_high_school_computer_science": 0.0,
+ "mmlu_high_school_european_history": 0.0,
+ "mmlu_high_school_geography": 0.0,
+ "mmlu_high_school_government_and_politics": 0.0,
+ "mmlu_high_school_macroeconomics": 0.0,
+ "mmlu_high_school_mathematics": 0.0,
+ "mmlu_high_school_microeconomics": 0.0,
+ "mmlu_high_school_physics": 0.0,
+ "mmlu_high_school_psychology": 0.0,
+ "mmlu_high_school_statistics": 0.0,
+ "mmlu_high_school_us_history": 0.0,
+ "mmlu_high_school_world_history": 0.0,
+ "mmlu_human_aging": 0.0,
+ "mmlu_human_sexuality": 0.0,
+ "mmlu_international_law": 0.0,
+ "mmlu_jurisprudence": 0.0,
+ "mmlu_logical_fallacies": 0.0,
+ "mmlu_machine_learning": 0.0,
+ "mmlu_management": 0.0,
+ "mmlu_marketing": 0.0,
+ "mmlu_medical_genetics": 0.0,
+ "mmlu_miscellaneous": 0.0,
+ "mmlu_moral_disputes": 0.0,
+ "mmlu_moral_scenarios": 0.0,
+ "mmlu_nutrition": 0.0,
+ "mmlu_philosophy": 0.0,
+ "mmlu_prehistory": 0.0,
+ "mmlu_professional_accounting": 0.0,
+ "mmlu_professional_law": 0.0,
+ "mmlu_professional_medicine": 0.0,
+ "mmlu_professional_psychology": 0.0,
+ "mmlu_public_relations": 0.0,
+ "mmlu_security_studies": 0.0,
+ "mmlu_sociology": 0.0,
+ "mmlu_us_foreign_policy": 0.0,
+ "mmlu_virology": 0.0,
+ "mmlu_world_religions": 0.0,
+ "truthfulqa_gen": 3.0,
+ "truthfulqa_mc1": 2.0,
+ "truthfulqa_mc2": 2.0,
+ "winogrande": 1.0
+ },
+ "n-shot": {
+ "Open LLM Leaderboard": 5,
+ "arc_challenge": 25,
+ "eq_bench": 0,
+ "gsm8k": 5,
+ "hellaswag": 10,
+ "mmlu": 0,
+ "mmlu_abstract_algebra": 5,
+ "mmlu_anatomy": 5,
+ "mmlu_astronomy": 5,
+ "mmlu_business_ethics": 5,
+ "mmlu_clinical_knowledge": 5,
+ "mmlu_college_biology": 5,
+ "mmlu_college_chemistry": 5,
+ "mmlu_college_computer_science": 5,
+ "mmlu_college_mathematics": 5,
+ "mmlu_college_medicine": 5,
+ "mmlu_college_physics": 5,
+ "mmlu_computer_security": 5,
+ "mmlu_conceptual_physics": 5,
+ "mmlu_econometrics": 5,
+ "mmlu_electrical_engineering": 5,
+ "mmlu_elementary_mathematics": 5,
+ "mmlu_formal_logic": 5,
+ "mmlu_global_facts": 5,
+ "mmlu_high_school_biology": 5,
+ "mmlu_high_school_chemistry": 5,
+ "mmlu_high_school_computer_science": 5,
+ "mmlu_high_school_european_history": 5,
+ "mmlu_high_school_geography": 5,
+ "mmlu_high_school_government_and_politics": 5,
+ "mmlu_high_school_macroeconomics": 5,
+ "mmlu_high_school_mathematics": 5,
+ "mmlu_high_school_microeconomics": 5,
+ "mmlu_high_school_physics": 5,
+ "mmlu_high_school_psychology": 5,
+ "mmlu_high_school_statistics": 5,
+ "mmlu_high_school_us_history": 5,
+ "mmlu_high_school_world_history": 5,
+ "mmlu_human_aging": 5,
+ "mmlu_human_sexuality": 5,
+ "mmlu_humanities": 5,
+ "mmlu_international_law": 5,
+ "mmlu_jurisprudence": 5,
+ "mmlu_logical_fallacies": 5,
+ "mmlu_machine_learning": 5,
+ "mmlu_management": 5,
+ "mmlu_marketing": 5,
+ "mmlu_medical_genetics": 5,
+ "mmlu_miscellaneous": 5,
+ "mmlu_moral_disputes": 5,
+ "mmlu_moral_scenarios": 5,
+ "mmlu_nutrition": 5,
+ "mmlu_other": 5,
+ "mmlu_philosophy": 5,
+ "mmlu_prehistory": 5,
+ "mmlu_professional_accounting": 5,
+ "mmlu_professional_law": 5,
+ "mmlu_professional_medicine": 5,
+ "mmlu_professional_psychology": 5,
+ "mmlu_public_relations": 5,
+ "mmlu_security_studies": 5,
+ "mmlu_social_sciences": 5,
+ "mmlu_sociology": 5,
+ "mmlu_stem": 5,
+ "mmlu_us_foreign_policy": 5,
+ "mmlu_virology": 5,
+ "mmlu_world_religions": 5,
+ "truthfulqa": 0,
+ "truthfulqa_gen": 0,
+ "truthfulqa_mc1": 0,
+ "truthfulqa_mc2": 0,
+ "winogrande": 5
+ },
+ "higher_is_better": {
+ "Open LLM Leaderboard": {
+ "exact_match": true,
+ "acc": true,
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc_norm": true
+ },
+ "arc_challenge": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "gsm8k": {
+ "exact_match": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ },
+ "mmlu": {
+ "acc": true
+ },
+ "mmlu_abstract_algebra": {
+ "acc": true
+ },
+ "mmlu_anatomy": {
+ "acc": true
+ },
+ "mmlu_astronomy": {
+ "acc": true
+ },
+ "mmlu_business_ethics": {
+ "acc": true
+ },
+ "mmlu_clinical_knowledge": {
+ "acc": true
+ },
+ "mmlu_college_biology": {
+ "acc": true
+ },
+ "mmlu_college_chemistry": {
+ "acc": true
+ },
+ "mmlu_college_computer_science": {
+ "acc": true
+ },
+ "mmlu_college_mathematics": {
+ "acc": true
+ },
+ "mmlu_college_medicine": {
+ "acc": true
+ },
+ "mmlu_college_physics": {
+ "acc": true
+ },
+ "mmlu_computer_security": {
+ "acc": true
+ },
+ "mmlu_conceptual_physics": {
+ "acc": true
+ },
+ "mmlu_econometrics": {
+ "acc": true
+ },
+ "mmlu_electrical_engineering": {
+ "acc": true
+ },
+ "mmlu_elementary_mathematics": {
+ "acc": true
+ },
+ "mmlu_formal_logic": {
+ "acc": true
+ },
+ "mmlu_global_facts": {
+ "acc": true
+ },
+ "mmlu_high_school_biology": {
+ "acc": true
+ },
+ "mmlu_high_school_chemistry": {
+ "acc": true
+ },
+ "mmlu_high_school_computer_science": {
+ "acc": true
+ },
+ "mmlu_high_school_european_history": {
+ "acc": true
+ },
+ "mmlu_high_school_geography": {
+ "acc": true
+ },
+ "mmlu_high_school_government_and_politics": {
+ "acc": true
+ },
+ "mmlu_high_school_macroeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_mathematics": {
+ "acc": true
+ },
+ "mmlu_high_school_microeconomics": {
+ "acc": true
+ },
+ "mmlu_high_school_physics": {
+ "acc": true
+ },
+ "mmlu_high_school_psychology": {
+ "acc": true
+ },
+ "mmlu_high_school_statistics": {
+ "acc": true
+ },
+ "mmlu_high_school_us_history": {
+ "acc": true
+ },
+ "mmlu_high_school_world_history": {
+ "acc": true
+ },
+ "mmlu_human_aging": {
+ "acc": true
+ },
+ "mmlu_human_sexuality": {
+ "acc": true
+ },
+ "mmlu_humanities": {
+ "acc": true
+ },
+ "mmlu_international_law": {
+ "acc": true
+ },
+ "mmlu_jurisprudence": {
+ "acc": true
+ },
+ "mmlu_logical_fallacies": {
+ "acc": true
+ },
+ "mmlu_machine_learning": {
+ "acc": true
+ },
+ "mmlu_management": {
+ "acc": true
+ },
+ "mmlu_marketing": {
+ "acc": true
+ },
+ "mmlu_medical_genetics": {
+ "acc": true
+ },
+ "mmlu_miscellaneous": {
+ "acc": true
+ },
+ "mmlu_moral_disputes": {
+ "acc": true
+ },
+ "mmlu_moral_scenarios": {
+ "acc": true
+ },
+ "mmlu_nutrition": {
+ "acc": true
+ },
+ "mmlu_other": {
+ "acc": true
+ },
+ "mmlu_philosophy": {
+ "acc": true
+ },
+ "mmlu_prehistory": {
+ "acc": true
+ },
+ "mmlu_professional_accounting": {
+ "acc": true
+ },
+ "mmlu_professional_law": {
+ "acc": true
+ },
+ "mmlu_professional_medicine": {
+ "acc": true
+ },
+ "mmlu_professional_psychology": {
+ "acc": true
+ },
+ "mmlu_public_relations": {
+ "acc": true
+ },
+ "mmlu_security_studies": {
+ "acc": true
+ },
+ "mmlu_social_sciences": {
+ "acc": true
+ },
+ "mmlu_sociology": {
+ "acc": true
+ },
+ "mmlu_stem": {
+ "acc": true
+ },
+ "mmlu_us_foreign_policy": {
+ "acc": true
+ },
+ "mmlu_virology": {
+ "acc": true
+ },
+ "mmlu_world_religions": {
+ "acc": true
+ },
+ "truthfulqa": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true,
+ "acc": true
+ },
+ "truthfulqa_gen": {
+ "bleu_max": true,
+ "bleu_acc": true,
+ "bleu_diff": true,
+ "rouge1_max": true,
+ "rouge1_acc": true,
+ "rouge1_diff": true,
+ "rouge2_max": true,
+ "rouge2_acc": true,
+ "rouge2_diff": true,
+ "rougeL_max": true,
+ "rougeL_acc": true,
+ "rougeL_diff": true
+ },
+ "truthfulqa_mc1": {
+ "acc": true
+ },
+ "truthfulqa_mc2": {
+ "acc": true
+ },
+ "winogrande": {
+ "acc": true
+ }
+ },
+ "n-samples": {
+ "gsm8k": {
+ "original": 1319,
+ "effective": 1319
+ },
+ "winogrande": {
+ "original": 1267,
+ "effective": 1267
+ },
+ "mmlu_high_school_european_history": {
+ "original": 165,
+ "effective": 165
+ },
+ "mmlu_formal_logic": {
+ "original": 126,
+ "effective": 126
+ },
+ "mmlu_moral_scenarios": {
+ "original": 895,
+ "effective": 895
+ },
+ "mmlu_moral_disputes": {
+ "original": 346,
+ "effective": 346
+ },
+ "mmlu_world_religions": {
+ "original": 171,
+ "effective": 171
+ },
+ "mmlu_high_school_world_history": {
+ "original": 237,
+ "effective": 237
+ },
+ "mmlu_logical_fallacies": {
+ "original": 163,
+ "effective": 163
+ },
+ "mmlu_international_law": {
+ "original": 121,
+ "effective": 121
+ },
+ "mmlu_philosophy": {
+ "original": 311,
+ "effective": 311
+ },
+ "mmlu_professional_law": {
+ "original": 1534,
+ "effective": 1534
+ },
+ "mmlu_high_school_us_history": {
+ "original": 204,
+ "effective": 204
+ },
+ "mmlu_prehistory": {
+ "original": 324,
+ "effective": 324
+ },
+ "mmlu_jurisprudence": {
+ "original": 108,
+ "effective": 108
+ },
+ "mmlu_high_school_psychology": {
+ "original": 545,
+ "effective": 545
+ },
+ "mmlu_sociology": {
+ "original": 201,
+ "effective": 201
+ },
+ "mmlu_high_school_government_and_politics": {
+ "original": 193,
+ "effective": 193
+ },
+ "mmlu_public_relations": {
+ "original": 110,
+ "effective": 110
+ },
+ "mmlu_high_school_macroeconomics": {
+ "original": 390,
+ "effective": 390
+ },
+ "mmlu_high_school_geography": {
+ "original": 198,
+ "effective": 198
+ },
+ "mmlu_high_school_microeconomics": {
+ "original": 238,
+ "effective": 238
+ },
+ "mmlu_security_studies": {
+ "original": 245,
+ "effective": 245
+ },
+ "mmlu_us_foreign_policy": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_professional_psychology": {
+ "original": 612,
+ "effective": 612
+ },
+ "mmlu_human_sexuality": {
+ "original": 131,
+ "effective": 131
+ },
+ "mmlu_econometrics": {
+ "original": 114,
+ "effective": 114
+ },
+ "mmlu_professional_medicine": {
+ "original": 272,
+ "effective": 272
+ },
+ "mmlu_professional_accounting": {
+ "original": 282,
+ "effective": 282
+ },
+ "mmlu_management": {
+ "original": 103,
+ "effective": 103
+ },
+ "mmlu_global_facts": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_medicine": {
+ "original": 173,
+ "effective": 173
+ },
+ "mmlu_business_ethics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_nutrition": {
+ "original": 306,
+ "effective": 306
+ },
+ "mmlu_medical_genetics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_virology": {
+ "original": 166,
+ "effective": 166
+ },
+ "mmlu_human_aging": {
+ "original": 223,
+ "effective": 223
+ },
+ "mmlu_clinical_knowledge": {
+ "original": 265,
+ "effective": 265
+ },
+ "mmlu_miscellaneous": {
+ "original": 783,
+ "effective": 783
+ },
+ "mmlu_marketing": {
+ "original": 234,
+ "effective": 234
+ },
+ "mmlu_high_school_chemistry": {
+ "original": 203,
+ "effective": 203
+ },
+ "mmlu_college_physics": {
+ "original": 102,
+ "effective": 102
+ },
+ "mmlu_college_mathematics": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_astronomy": {
+ "original": 152,
+ "effective": 152
+ },
+ "mmlu_high_school_physics": {
+ "original": 151,
+ "effective": 151
+ },
+ "mmlu_computer_security": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_elementary_mathematics": {
+ "original": 378,
+ "effective": 378
+ },
+ "mmlu_electrical_engineering": {
+ "original": 145,
+ "effective": 145
+ },
+ "mmlu_college_biology": {
+ "original": 144,
+ "effective": 144
+ },
+ "mmlu_machine_learning": {
+ "original": 112,
+ "effective": 112
+ },
+ "mmlu_high_school_biology": {
+ "original": 310,
+ "effective": 310
+ },
+ "mmlu_high_school_mathematics": {
+ "original": 270,
+ "effective": 270
+ },
+ "mmlu_anatomy": {
+ "original": 135,
+ "effective": 135
+ },
+ "mmlu_high_school_statistics": {
+ "original": 216,
+ "effective": 216
+ },
+ "mmlu_college_chemistry": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_conceptual_physics": {
+ "original": 235,
+ "effective": 235
+ },
+ "mmlu_high_school_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_college_computer_science": {
+ "original": 100,
+ "effective": 100
+ },
+ "mmlu_abstract_algebra": {
+ "original": 100,
+ "effective": 100
+ },
+ "truthfulqa_gen": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc1": {
+ "original": 817,
+ "effective": 817
+ },
+ "truthfulqa_mc2": {
+ "original": 817,
+ "effective": 817
+ },
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "arc_challenge": {
+ "original": 1172,
+ "effective": 1172
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=FallenMerick/Smart-Lemon-Cookie-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "24a18cbcb94c55811593f89026c6fe51331f4a57",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 2
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719550043.4933457,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "FallenMerick/Smart-Lemon-Cookie-7B",
+ "model_name_sanitized": "FallenMerick__Smart-Lemon-Cookie-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 102426.774034499,
+ "end_time": 138957.776397903,
+ "total_evaluation_time_seconds": "36531.00236340401"
+}
\ No newline at end of file
diff --git a/Himitsui__KuroMitsu-11B/.ipynb_checkpoints/results_2024-07-01T22-05-02.101817-checkpoint.json b/Himitsui__KuroMitsu-11B/.ipynb_checkpoints/results_2024-07-01T22-05-02.101817-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..973e44e14f2a0acff64f887349e6ad167be75f92
--- /dev/null
+++ b/Himitsui__KuroMitsu-11B/.ipynb_checkpoints/results_2024-07-01T22-05-02.101817-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6841266679944235,
+ "acc_stderr,none": 0.004639126951051454,
+ "acc_norm,none": 0.8632742481577375,
+ "acc_norm_stderr,none": 0.0034285545959502227,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 70.49864838913086,
+ "eqbench_stderr,none": 2.176357434703893,
+ "percent_parseable,none": 98.83040935672514,
+ "percent_parseable_stderr,none": 0.824589459544595,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Himitsui/KuroMitsu-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "7bd8487fc3a5c3bac022bfe8c34d2f630c123d40",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719869292.2336426,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Himitsui/KuroMitsu-11B",
+ "model_name_sanitized": "Himitsui__KuroMitsu-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 80587.309586819,
+ "end_time": 82804.031242981,
+ "total_evaluation_time_seconds": "2216.7216561620007"
+}
\ No newline at end of file
diff --git a/Himitsui__KuroMitsu-11B/results_2024-07-01T22-05-02.101817.json b/Himitsui__KuroMitsu-11B/results_2024-07-01T22-05-02.101817.json
new file mode 100644
index 0000000000000000000000000000000000000000..973e44e14f2a0acff64f887349e6ad167be75f92
--- /dev/null
+++ b/Himitsui__KuroMitsu-11B/results_2024-07-01T22-05-02.101817.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6841266679944235,
+ "acc_stderr,none": 0.004639126951051454,
+ "acc_norm,none": 0.8632742481577375,
+ "acc_norm_stderr,none": 0.0034285545959502227,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 70.49864838913086,
+ "eqbench_stderr,none": 2.176357434703893,
+ "percent_parseable,none": 98.83040935672514,
+ "percent_parseable_stderr,none": 0.824589459544595,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Himitsui/KuroMitsu-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "7bd8487fc3a5c3bac022bfe8c34d2f630c123d40",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719869292.2336426,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Himitsui/KuroMitsu-11B",
+ "model_name_sanitized": "Himitsui__KuroMitsu-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 80587.309586819,
+ "end_time": 82804.031242981,
+ "total_evaluation_time_seconds": "2216.7216561620007"
+}
\ No newline at end of file
diff --git a/HuggingFaceH4__zephyr-7b-beta/.ipynb_checkpoints/results_2024-07-02T05-33-39.653334-checkpoint.json b/HuggingFaceH4__zephyr-7b-beta/.ipynb_checkpoints/results_2024-07-02T05-33-39.653334-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..8c29a824d238cae93fe79460729b0881fcade1ba
--- /dev/null
+++ b/HuggingFaceH4__zephyr-7b-beta/.ipynb_checkpoints/results_2024-07-02T05-33-39.653334-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6398127862975503,
+ "acc_stderr,none": 0.0047907346837046125,
+ "acc_norm,none": 0.8201553475403306,
+ "acc_norm_stderr,none": 0.0038327310175919993,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 58.1357621766725,
+ "eqbench_stderr,none": 2.8035833778342916,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b70e0c9a2d9e14bd1e812d3c398e5f313e93b473",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719896897.7463036,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "HuggingFaceH4/zephyr-7b-beta",
+ "model_name_sanitized": "HuggingFaceH4__zephyr-7b-beta",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 108192.842263836,
+ "end_time": 109721.582768754,
+ "total_evaluation_time_seconds": "1528.7405049179943"
+}
\ No newline at end of file
diff --git a/HuggingFaceH4__zephyr-7b-beta/results_2024-07-02T05-33-39.653334.json b/HuggingFaceH4__zephyr-7b-beta/results_2024-07-02T05-33-39.653334.json
new file mode 100644
index 0000000000000000000000000000000000000000..8c29a824d238cae93fe79460729b0881fcade1ba
--- /dev/null
+++ b/HuggingFaceH4__zephyr-7b-beta/results_2024-07-02T05-33-39.653334.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6398127862975503,
+ "acc_stderr,none": 0.0047907346837046125,
+ "acc_norm,none": 0.8201553475403306,
+ "acc_norm_stderr,none": 0.0038327310175919993,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 58.1357621766725,
+ "eqbench_stderr,none": 2.8035833778342916,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=HuggingFaceH4/zephyr-7b-beta,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b70e0c9a2d9e14bd1e812d3c398e5f313e93b473",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719896897.7463036,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "HuggingFaceH4/zephyr-7b-beta",
+ "model_name_sanitized": "HuggingFaceH4__zephyr-7b-beta",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 108192.842263836,
+ "end_time": 109721.582768754,
+ "total_evaluation_time_seconds": "1528.7405049179943"
+}
\ No newline at end of file
diff --git a/Intel__neural-chat-7b-v3-1/.ipynb_checkpoints/results_2024-06-27T21-55-55.507233-checkpoint.json b/Intel__neural-chat-7b-v3-1/.ipynb_checkpoints/results_2024-06-27T21-55-55.507233-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..fe1a0d12f93ccd27088fa03e9defff9d585de769
--- /dev/null
+++ b/Intel__neural-chat-7b-v3-1/.ipynb_checkpoints/results_2024-06-27T21-55-55.507233-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6323441545508863,
+ "acc_stderr,none": 0.004811815959388812,
+ "acc_norm,none": 0.7975502887870942,
+ "acc_norm_stderr,none": 0.004010043978333027,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 62.2626522660805,
+ "eqbench_stderr,none": 2.2134366454600554,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Intel/neural-chat-7b-v3-1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c0d379a49c1c0579529d5e6f2e936ddb759552a8",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719523797.1185606,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Intel/neural-chat-7b-v3-1",
+ "model_name_sanitized": "Intel__neural-chat-7b-v3-1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 76180.270750278,
+ "end_time": 77745.57190531,
+ "total_evaluation_time_seconds": "1565.301155032008"
+}
\ No newline at end of file
diff --git a/Intel__neural-chat-7b-v3-1/results_2024-06-27T21-55-55.507233.json b/Intel__neural-chat-7b-v3-1/results_2024-06-27T21-55-55.507233.json
new file mode 100644
index 0000000000000000000000000000000000000000..fe1a0d12f93ccd27088fa03e9defff9d585de769
--- /dev/null
+++ b/Intel__neural-chat-7b-v3-1/results_2024-06-27T21-55-55.507233.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6323441545508863,
+ "acc_stderr,none": 0.004811815959388812,
+ "acc_norm,none": 0.7975502887870942,
+ "acc_norm_stderr,none": 0.004010043978333027,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 62.2626522660805,
+ "eqbench_stderr,none": 2.2134366454600554,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Intel/neural-chat-7b-v3-1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c0d379a49c1c0579529d5e6f2e936ddb759552a8",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719523797.1185606,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Intel/neural-chat-7b-v3-1",
+ "model_name_sanitized": "Intel__neural-chat-7b-v3-1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 76180.270750278,
+ "end_time": 77745.57190531,
+ "total_evaluation_time_seconds": "1565.301155032008"
+}
\ No newline at end of file
diff --git a/KatyTheCutie__LemonadeRP-4.5.3/.ipynb_checkpoints/results_2024-07-02T08-08-46.956689-checkpoint.json b/KatyTheCutie__LemonadeRP-4.5.3/.ipynb_checkpoints/results_2024-07-02T08-08-46.956689-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4b426fbab2738a869cab30d84a76e65820785fe
--- /dev/null
+++ b/KatyTheCutie__LemonadeRP-4.5.3/.ipynb_checkpoints/results_2024-07-02T08-08-46.956689-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6444931288587931,
+ "acc_stderr,none": 0.004776883632722606,
+ "acc_norm,none": 0.8265285799641505,
+ "acc_norm_stderr,none": 0.0037788044746058284,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.22759969511479,
+ "eqbench_stderr,none": 2.4086520534332245,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KatyTheCutie/LemonadeRP-4.5.3,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "3f2309618a48035253889f01d4df2d7f1e81b730",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719906146.1677766,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KatyTheCutie/LemonadeRP-4.5.3",
+ "model_name_sanitized": "KatyTheCutie__LemonadeRP-4.5.3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 117441.278859038,
+ "end_time": 119028.886089565,
+ "total_evaluation_time_seconds": "1587.6072305270063"
+}
\ No newline at end of file
diff --git a/KatyTheCutie__LemonadeRP-4.5.3/results_2024-07-02T08-08-46.956689.json b/KatyTheCutie__LemonadeRP-4.5.3/results_2024-07-02T08-08-46.956689.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4b426fbab2738a869cab30d84a76e65820785fe
--- /dev/null
+++ b/KatyTheCutie__LemonadeRP-4.5.3/results_2024-07-02T08-08-46.956689.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6444931288587931,
+ "acc_stderr,none": 0.004776883632722606,
+ "acc_norm,none": 0.8265285799641505,
+ "acc_norm_stderr,none": 0.0037788044746058284,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.22759969511479,
+ "eqbench_stderr,none": 2.4086520534332245,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KatyTheCutie/LemonadeRP-4.5.3,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "3f2309618a48035253889f01d4df2d7f1e81b730",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719906146.1677766,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KatyTheCutie/LemonadeRP-4.5.3",
+ "model_name_sanitized": "KatyTheCutie__LemonadeRP-4.5.3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 117441.278859038,
+ "end_time": 119028.886089565,
+ "total_evaluation_time_seconds": "1587.6072305270063"
+}
\ No newline at end of file
diff --git a/KoboldAI__Mistral-7B-Erebus-v3/.ipynb_checkpoints/results_2024-06-28T02-01-18.290687-checkpoint.json b/KoboldAI__Mistral-7B-Erebus-v3/.ipynb_checkpoints/results_2024-06-28T02-01-18.290687-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..0f7ff3d4facd0f716332c869979bc5ef32076ac4
--- /dev/null
+++ b/KoboldAI__Mistral-7B-Erebus-v3/.ipynb_checkpoints/results_2024-06-28T02-01-18.290687-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5837482573192591,
+ "acc_stderr,none": 0.0049192891130275095,
+ "acc_norm,none": 0.7665803624775941,
+ "acc_norm_stderr,none": 0.004221424792919153,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 18.19761609584577,
+ "eqbench_stderr,none": 3.474273216617232,
+ "percent_parseable,none": 97.6608187134503,
+ "percent_parseable_stderr,none": 1.1592247905734945,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KoboldAI/Mistral-7B-Erebus-v3,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "476c2eed031028f2fcfb9b8d0115b83363cec904",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719538494.5098195,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KoboldAI/Mistral-7B-Erebus-v3",
+ "model_name_sanitized": "KoboldAI__Mistral-7B-Erebus-v3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90877.776869387,
+ "end_time": 92468.355496828,
+ "total_evaluation_time_seconds": "1590.578627440991"
+}
\ No newline at end of file
diff --git a/KoboldAI__Mistral-7B-Erebus-v3/results_2024-06-28T02-01-18.290687.json b/KoboldAI__Mistral-7B-Erebus-v3/results_2024-06-28T02-01-18.290687.json
new file mode 100644
index 0000000000000000000000000000000000000000..0f7ff3d4facd0f716332c869979bc5ef32076ac4
--- /dev/null
+++ b/KoboldAI__Mistral-7B-Erebus-v3/results_2024-06-28T02-01-18.290687.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5837482573192591,
+ "acc_stderr,none": 0.0049192891130275095,
+ "acc_norm,none": 0.7665803624775941,
+ "acc_norm_stderr,none": 0.004221424792919153,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 18.19761609584577,
+ "eqbench_stderr,none": 3.474273216617232,
+ "percent_parseable,none": 97.6608187134503,
+ "percent_parseable_stderr,none": 1.1592247905734945,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KoboldAI/Mistral-7B-Erebus-v3,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "476c2eed031028f2fcfb9b8d0115b83363cec904",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719538494.5098195,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KoboldAI/Mistral-7B-Erebus-v3",
+ "model_name_sanitized": "KoboldAI__Mistral-7B-Erebus-v3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90877.776869387,
+ "end_time": 92468.355496828,
+ "total_evaluation_time_seconds": "1590.578627440991"
+}
\ No newline at end of file
diff --git a/KoboldAI__Mistral-7B-Holodeck-1/.ipynb_checkpoints/results_2024-06-28T01-04-59.368025-checkpoint.json b/KoboldAI__Mistral-7B-Holodeck-1/.ipynb_checkpoints/results_2024-06-28T01-04-59.368025-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a377554e048161fa34a22f7d4718198ed9d71be
--- /dev/null
+++ b/KoboldAI__Mistral-7B-Holodeck-1/.ipynb_checkpoints/results_2024-06-28T01-04-59.368025-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6026687910774746,
+ "acc_stderr,none": 0.004883455188908956,
+ "acc_norm,none": 0.7918741286596296,
+ "acc_norm_stderr,none": 0.0040513767194979506,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 2.099910527905425,
+ "eqbench_stderr,none": 2.491702523648299,
+ "percent_parseable,none": 98.24561403508773,
+ "percent_parseable_stderr,none": 1.0069193740062292,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KoboldAI/Mistral-7B-Holodeck-1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "76057cc5c1923921162133c81ae7ca0e92755810",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719535119.7065547,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KoboldAI/Mistral-7B-Holodeck-1",
+ "model_name_sanitized": "KoboldAI__Mistral-7B-Holodeck-1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87502.926965946,
+ "end_time": 89089.432844292,
+ "total_evaluation_time_seconds": "1586.5058783459972"
+}
\ No newline at end of file
diff --git a/KoboldAI__Mistral-7B-Holodeck-1/results_2024-06-28T01-04-59.368025.json b/KoboldAI__Mistral-7B-Holodeck-1/results_2024-06-28T01-04-59.368025.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a377554e048161fa34a22f7d4718198ed9d71be
--- /dev/null
+++ b/KoboldAI__Mistral-7B-Holodeck-1/results_2024-06-28T01-04-59.368025.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6026687910774746,
+ "acc_stderr,none": 0.004883455188908956,
+ "acc_norm,none": 0.7918741286596296,
+ "acc_norm_stderr,none": 0.0040513767194979506,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 2.099910527905425,
+ "eqbench_stderr,none": 2.491702523648299,
+ "percent_parseable,none": 98.24561403508773,
+ "percent_parseable_stderr,none": 1.0069193740062292,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=KoboldAI/Mistral-7B-Holodeck-1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "76057cc5c1923921162133c81ae7ca0e92755810",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719535119.7065547,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "KoboldAI/Mistral-7B-Holodeck-1",
+ "model_name_sanitized": "KoboldAI__Mistral-7B-Holodeck-1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87502.926965946,
+ "end_time": 89089.432844292,
+ "total_evaluation_time_seconds": "1586.5058783459972"
+}
\ No newline at end of file
diff --git a/NeverSleep__Mistral-11B-SynthIAirOmniMix/.ipynb_checkpoints/results_2024-07-01T23-28-29.609057-checkpoint.json b/NeverSleep__Mistral-11B-SynthIAirOmniMix/.ipynb_checkpoints/results_2024-07-01T23-28-29.609057-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..6ec670cd986713dcd4717678c8a5490fe46770e4
--- /dev/null
+++ b/NeverSleep__Mistral-11B-SynthIAirOmniMix/.ipynb_checkpoints/results_2024-07-01T23-28-29.609057-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6276638119896435,
+ "acc_stderr,none": 0.00482439307682663,
+ "acc_norm,none": 0.8157737502489544,
+ "acc_norm_stderr,none": 0.0038687630867377757,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 55.19425772388985,
+ "eqbench_stderr,none": 2.8835673004687723,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NeverSleep/Mistral-11B-SynthIAirOmniMix,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "639cca3fff101891a70eb2f12fc835598ed50eb0",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719874190.80644,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NeverSleep/Mistral-11B-SynthIAirOmniMix",
+ "model_name_sanitized": "NeverSleep__Mistral-11B-SynthIAirOmniMix",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85485.875698911,
+ "end_time": 87811.538471,
+ "total_evaluation_time_seconds": "2325.66277208901"
+}
\ No newline at end of file
diff --git a/NeverSleep__Mistral-11B-SynthIAirOmniMix/results_2024-07-01T23-28-29.609057.json b/NeverSleep__Mistral-11B-SynthIAirOmniMix/results_2024-07-01T23-28-29.609057.json
new file mode 100644
index 0000000000000000000000000000000000000000..6ec670cd986713dcd4717678c8a5490fe46770e4
--- /dev/null
+++ b/NeverSleep__Mistral-11B-SynthIAirOmniMix/results_2024-07-01T23-28-29.609057.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6276638119896435,
+ "acc_stderr,none": 0.00482439307682663,
+ "acc_norm,none": 0.8157737502489544,
+ "acc_norm_stderr,none": 0.0038687630867377757,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 55.19425772388985,
+ "eqbench_stderr,none": 2.8835673004687723,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NeverSleep/Mistral-11B-SynthIAirOmniMix,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "639cca3fff101891a70eb2f12fc835598ed50eb0",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719874190.80644,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NeverSleep/Mistral-11B-SynthIAirOmniMix",
+ "model_name_sanitized": "NeverSleep__Mistral-11B-SynthIAirOmniMix",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85485.875698911,
+ "end_time": 87811.538471,
+ "total_evaluation_time_seconds": "2325.66277208901"
+}
\ No newline at end of file
diff --git a/Norquinal__Mistral-7B-claude-chat/.ipynb_checkpoints/results_2024-07-02T07-25-06.524375-checkpoint.json b/Norquinal__Mistral-7B-claude-chat/.ipynb_checkpoints/results_2024-07-02T07-25-06.524375-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..82604102a2e9825262ac9572bf5c903d34e7be9f
--- /dev/null
+++ b/Norquinal__Mistral-7B-claude-chat/.ipynb_checkpoints/results_2024-07-02T07-25-06.524375-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6319458275243975,
+ "acc_stderr,none": 0.004812905279066437,
+ "acc_norm,none": 0.8306114319856602,
+ "acc_norm_stderr,none": 0.003743281749373698,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 16.33570389924275,
+ "eqbench_stderr,none": 2.9383702981155455,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374279,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Norquinal/Mistral-7B-claude-chat,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "781aa96659a06a1c70fb1f27f68c7db3f5ab760e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719903556.2722895,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Norquinal/Mistral-7B-claude-chat",
+ "model_name_sanitized": "Norquinal__Mistral-7B-claude-chat",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 114851.308841458,
+ "end_time": 116408.453788708,
+ "total_evaluation_time_seconds": "1557.1449472499953"
+}
\ No newline at end of file
diff --git a/Norquinal__Mistral-7B-claude-chat/results_2024-07-02T07-25-06.524375.json b/Norquinal__Mistral-7B-claude-chat/results_2024-07-02T07-25-06.524375.json
new file mode 100644
index 0000000000000000000000000000000000000000..82604102a2e9825262ac9572bf5c903d34e7be9f
--- /dev/null
+++ b/Norquinal__Mistral-7B-claude-chat/results_2024-07-02T07-25-06.524375.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6319458275243975,
+ "acc_stderr,none": 0.004812905279066437,
+ "acc_norm,none": 0.8306114319856602,
+ "acc_norm_stderr,none": 0.003743281749373698,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 16.33570389924275,
+ "eqbench_stderr,none": 2.9383702981155455,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374279,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Norquinal/Mistral-7B-claude-chat,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "781aa96659a06a1c70fb1f27f68c7db3f5ab760e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719903556.2722895,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Norquinal/Mistral-7B-claude-chat",
+ "model_name_sanitized": "Norquinal__Mistral-7B-claude-chat",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 114851.308841458,
+ "end_time": 116408.453788708,
+ "total_evaluation_time_seconds": "1557.1449472499953"
+}
\ No newline at end of file
diff --git a/NousResearch__Hermes-2-Pro-Mistral-7B/.ipynb_checkpoints/results_2024-06-28T00-36-44.931474-checkpoint.json b/NousResearch__Hermes-2-Pro-Mistral-7B/.ipynb_checkpoints/results_2024-06-28T00-36-44.931474-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd970e1db77010cc736e2685c5c573cde98b76ca
--- /dev/null
+++ b/NousResearch__Hermes-2-Pro-Mistral-7B/.ipynb_checkpoints/results_2024-06-28T00-36-44.931474-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6267675761800439,
+ "acc_stderr,none": 0.004826746160830164,
+ "acc_norm,none": 0.8055168293168692,
+ "acc_norm_stderr,none": 0.003949933997955457,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.92538496559615,
+ "eqbench_stderr,none": 2.278499695256187,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Hermes-2-Pro-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241994240,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "09317b1d8da639b5d9af77c06aa17cde0f0f91c0",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719533491.7175071,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Hermes-2-Pro-Mistral-7B",
+ "model_name_sanitized": "NousResearch__Hermes-2-Pro-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85874.893829605,
+ "end_time": 87394.996271898,
+ "total_evaluation_time_seconds": "1520.1024422929913"
+}
\ No newline at end of file
diff --git a/NousResearch__Hermes-2-Pro-Mistral-7B/results_2024-06-28T00-36-44.931474.json b/NousResearch__Hermes-2-Pro-Mistral-7B/results_2024-06-28T00-36-44.931474.json
new file mode 100644
index 0000000000000000000000000000000000000000..bd970e1db77010cc736e2685c5c573cde98b76ca
--- /dev/null
+++ b/NousResearch__Hermes-2-Pro-Mistral-7B/results_2024-06-28T00-36-44.931474.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6267675761800439,
+ "acc_stderr,none": 0.004826746160830164,
+ "acc_norm,none": 0.8055168293168692,
+ "acc_norm_stderr,none": 0.003949933997955457,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.92538496559615,
+ "eqbench_stderr,none": 2.278499695256187,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Hermes-2-Pro-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241994240,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "09317b1d8da639b5d9af77c06aa17cde0f0f91c0",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719533491.7175071,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Hermes-2-Pro-Mistral-7B",
+ "model_name_sanitized": "NousResearch__Hermes-2-Pro-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85874.893829605,
+ "end_time": 87394.996271898,
+ "total_evaluation_time_seconds": "1520.1024422929913"
+}
\ No newline at end of file
diff --git a/NousResearch__Nous-Capybara-7B-V1.9/.ipynb_checkpoints/results_2024-07-02T07-40-59.772360-checkpoint.json b/NousResearch__Nous-Capybara-7B-V1.9/.ipynb_checkpoints/results_2024-07-02T07-40-59.772360-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6d7d7f938cbfd330b0bc32a88823a45058948f6
--- /dev/null
+++ b/NousResearch__Nous-Capybara-7B-V1.9/.ipynb_checkpoints/results_2024-07-02T07-40-59.772360-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6075482971519618,
+ "acc_stderr,none": 0.004872984492967986,
+ "acc_norm,none": 0.7870942043417646,
+ "acc_norm_stderr,none": 0.004085249783499773,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 19.249537416299493,
+ "eqbench_stderr,none": 3.5871216396534114,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Nous-Capybara-7B-V1.9,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "ea08e10fb568f676e19e810d11d4a5ee6b3f02b3",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719904544.6768596,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Nous-Capybara-7B-V1.9",
+ "model_name_sanitized": "NousResearch__Nous-Capybara-7B-V1.9",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 115839.811088593,
+ "end_time": 117361.70177151,
+ "total_evaluation_time_seconds": "1521.890682917001"
+}
\ No newline at end of file
diff --git a/NousResearch__Nous-Capybara-7B-V1.9/results_2024-07-02T07-40-59.772360.json b/NousResearch__Nous-Capybara-7B-V1.9/results_2024-07-02T07-40-59.772360.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6d7d7f938cbfd330b0bc32a88823a45058948f6
--- /dev/null
+++ b/NousResearch__Nous-Capybara-7B-V1.9/results_2024-07-02T07-40-59.772360.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6075482971519618,
+ "acc_stderr,none": 0.004872984492967986,
+ "acc_norm,none": 0.7870942043417646,
+ "acc_norm_stderr,none": 0.004085249783499773,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 19.249537416299493,
+ "eqbench_stderr,none": 3.5871216396534114,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Nous-Capybara-7B-V1.9,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "ea08e10fb568f676e19e810d11d4a5ee6b3f02b3",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719904544.6768596,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Nous-Capybara-7B-V1.9",
+ "model_name_sanitized": "NousResearch__Nous-Capybara-7B-V1.9",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 115839.811088593,
+ "end_time": 117361.70177151,
+ "total_evaluation_time_seconds": "1521.890682917001"
+}
\ No newline at end of file
diff --git a/NousResearch__Nous-Hermes-2-SOLAR-10.7B/.ipynb_checkpoints/results_2024-07-01T22-46-11.267534-checkpoint.json b/NousResearch__Nous-Hermes-2-SOLAR-10.7B/.ipynb_checkpoints/results_2024-07-01T22-46-11.267534-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e67ebb326ebcc1c3ed0be90cb7cb4ed784d2016
--- /dev/null
+++ b/NousResearch__Nous-Hermes-2-SOLAR-10.7B/.ipynb_checkpoints/results_2024-07-01T22-46-11.267534-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6467835092611034,
+ "acc_stderr,none": 0.004769924131304646,
+ "acc_norm,none": 0.8324039036048596,
+ "acc_norm_stderr,none": 0.0037274387865133193,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.524669890379585,
+ "eqbench_stderr,none": 2.5941253832883335,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Nous-Hermes-2-SOLAR-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731540480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "14c1fbe2f71acdcd58247b30d5439bd572d52386",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719871760.8771381,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Nous-Hermes-2-SOLAR-10.7B",
+ "model_name_sanitized": "NousResearch__Nous-Hermes-2-SOLAR-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 83055.962424888,
+ "end_time": 85273.196902306,
+ "total_evaluation_time_seconds": "2217.2344774179946"
+}
\ No newline at end of file
diff --git a/NousResearch__Nous-Hermes-2-SOLAR-10.7B/results_2024-07-01T22-46-11.267534.json b/NousResearch__Nous-Hermes-2-SOLAR-10.7B/results_2024-07-01T22-46-11.267534.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e67ebb326ebcc1c3ed0be90cb7cb4ed784d2016
--- /dev/null
+++ b/NousResearch__Nous-Hermes-2-SOLAR-10.7B/results_2024-07-01T22-46-11.267534.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6467835092611034,
+ "acc_stderr,none": 0.004769924131304646,
+ "acc_norm,none": 0.8324039036048596,
+ "acc_norm_stderr,none": 0.0037274387865133193,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.524669890379585,
+ "eqbench_stderr,none": 2.5941253832883335,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=NousResearch/Nous-Hermes-2-SOLAR-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731540480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "14c1fbe2f71acdcd58247b30d5439bd572d52386",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719871760.8771381,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "NousResearch/Nous-Hermes-2-SOLAR-10.7B",
+ "model_name_sanitized": "NousResearch__Nous-Hermes-2-SOLAR-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 83055.962424888,
+ "end_time": 85273.196902306,
+ "total_evaluation_time_seconds": "2217.2344774179946"
+}
\ No newline at end of file
diff --git a/Open-Orca__Mistral-7B-OpenOrca/.ipynb_checkpoints/results_2024-06-27T21-00-54.306241-checkpoint.json b/Open-Orca__Mistral-7B-OpenOrca/.ipynb_checkpoints/results_2024-06-27T21-00-54.306241-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..9bf85d5da14f7c1da3bc56ca16bae6d1c443d395
--- /dev/null
+++ b/Open-Orca__Mistral-7B-OpenOrca/.ipynb_checkpoints/results_2024-06-27T21-00-54.306241-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6379207329217288,
+ "acc_stderr,none": 0.004796193584930065,
+ "acc_norm,none": 0.8166699860585541,
+ "acc_norm_stderr,none": 0.0038614605262315377,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.978950638437865,
+ "eqbench_stderr,none": 2.3824356593314344,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374284,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Open-Orca/Mistral-7B-OpenOrca,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "4a37328cef00f524d3791b1c0cc559a3cc6af14d",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719520557.5287726,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Open-Orca/Mistral-7B-OpenOrca",
+ "model_name_sanitized": "Open-Orca__Mistral-7B-OpenOrca",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 72940.705278236,
+ "end_time": 74444.371073833,
+ "total_evaluation_time_seconds": "1503.6657955970004"
+}
\ No newline at end of file
diff --git a/Open-Orca__Mistral-7B-OpenOrca/results_2024-06-27T21-00-54.306241.json b/Open-Orca__Mistral-7B-OpenOrca/results_2024-06-27T21-00-54.306241.json
new file mode 100644
index 0000000000000000000000000000000000000000..9bf85d5da14f7c1da3bc56ca16bae6d1c443d395
--- /dev/null
+++ b/Open-Orca__Mistral-7B-OpenOrca/results_2024-06-27T21-00-54.306241.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6379207329217288,
+ "acc_stderr,none": 0.004796193584930065,
+ "acc_norm,none": 0.8166699860585541,
+ "acc_norm_stderr,none": 0.0038614605262315377,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 63.978950638437865,
+ "eqbench_stderr,none": 2.3824356593314344,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374284,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Open-Orca/Mistral-7B-OpenOrca,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "4a37328cef00f524d3791b1c0cc559a3cc6af14d",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719520557.5287726,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Open-Orca/Mistral-7B-OpenOrca",
+ "model_name_sanitized": "Open-Orca__Mistral-7B-OpenOrca",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 72940.705278236,
+ "end_time": 74444.371073833,
+ "total_evaluation_time_seconds": "1503.6657955970004"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Kunoichi-7B/.ipynb_checkpoints/results_2024-06-27T20-34-47.197919-checkpoint.json b/SanjiWatsuki__Kunoichi-7B/.ipynb_checkpoints/results_2024-06-27T20-34-47.197919-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..4c41c912a88eb717f5501954f491d17bf54e2dbd
--- /dev/null
+++ b/SanjiWatsuki__Kunoichi-7B/.ipynb_checkpoints/results_2024-06-27T20-34-47.197919-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6803425612427804,
+ "acc_stderr,none": 0.004653907471785688,
+ "acc_norm,none": 0.8525194184425413,
+ "acc_norm_stderr,none": 0.003538596773704852,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 72.35673723130577,
+ "eqbench_stderr,none": 1.842888264461036,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Kunoichi-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b2c23b9d0036e6e74e5f61de74776e9091956c83",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719519064.691441,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Kunoichi-7B",
+ "model_name_sanitized": "SanjiWatsuki__Kunoichi-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 71447.838088771,
+ "end_time": 72877.26274353,
+ "total_evaluation_time_seconds": "1429.424654759001"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Kunoichi-7B/results_2024-06-27T20-34-47.197919.json b/SanjiWatsuki__Kunoichi-7B/results_2024-06-27T20-34-47.197919.json
new file mode 100644
index 0000000000000000000000000000000000000000..4c41c912a88eb717f5501954f491d17bf54e2dbd
--- /dev/null
+++ b/SanjiWatsuki__Kunoichi-7B/results_2024-06-27T20-34-47.197919.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6803425612427804,
+ "acc_stderr,none": 0.004653907471785688,
+ "acc_norm,none": 0.8525194184425413,
+ "acc_norm_stderr,none": 0.003538596773704852,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 72.35673723130577,
+ "eqbench_stderr,none": 1.842888264461036,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Kunoichi-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b2c23b9d0036e6e74e5f61de74776e9091956c83",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719519064.691441,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Kunoichi-7B",
+ "model_name_sanitized": "SanjiWatsuki__Kunoichi-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 71447.838088771,
+ "end_time": 72877.26274353,
+ "total_evaluation_time_seconds": "1429.424654759001"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Loyal-Macaroni-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-14-04.529485-checkpoint.json b/SanjiWatsuki__Loyal-Macaroni-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-14-04.529485-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..90b10cc09d43f3b06138d65b5a7117c89fe90caa
--- /dev/null
+++ b/SanjiWatsuki__Loyal-Macaroni-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-14-04.529485-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6699860585540729,
+ "acc_stderr,none": 0.004692567655961757,
+ "acc_norm,none": 0.8453495319657439,
+ "acc_norm_stderr,none": 0.0036083220651419597,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.66931196891234,
+ "eqbench_stderr,none": 1.6676417973789068,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Loyal-Macaroni-Maid-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "831837e474f6c474f68f3c31a62ef7eb01b9f5b7",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719899408.9462144,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Loyal-Macaroni-Maid-7B",
+ "model_name_sanitized": "SanjiWatsuki__Loyal-Macaroni-Maid-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 110704.019752883,
+ "end_time": 112146.458918638,
+ "total_evaluation_time_seconds": "1442.4391657550004"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Loyal-Macaroni-Maid-7B/results_2024-07-02T06-14-04.529485.json b/SanjiWatsuki__Loyal-Macaroni-Maid-7B/results_2024-07-02T06-14-04.529485.json
new file mode 100644
index 0000000000000000000000000000000000000000..90b10cc09d43f3b06138d65b5a7117c89fe90caa
--- /dev/null
+++ b/SanjiWatsuki__Loyal-Macaroni-Maid-7B/results_2024-07-02T06-14-04.529485.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6699860585540729,
+ "acc_stderr,none": 0.004692567655961757,
+ "acc_norm,none": 0.8453495319657439,
+ "acc_norm_stderr,none": 0.0036083220651419597,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.66931196891234,
+ "eqbench_stderr,none": 1.6676417973789068,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Loyal-Macaroni-Maid-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "831837e474f6c474f68f3c31a62ef7eb01b9f5b7",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719899408.9462144,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Loyal-Macaroni-Maid-7B",
+ "model_name_sanitized": "SanjiWatsuki__Loyal-Macaroni-Maid-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 110704.019752883,
+ "end_time": 112146.458918638,
+ "total_evaluation_time_seconds": "1442.4391657550004"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Silicon-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-55-56.426785-checkpoint.json b/SanjiWatsuki__Silicon-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-55-56.426785-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..baace808a5a0d07a04e9177bb509393a9fcd2a1d
--- /dev/null
+++ b/SanjiWatsuki__Silicon-Maid-7B/.ipynb_checkpoints/results_2024-07-02T06-55-56.426785-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6676956781517626,
+ "acc_stderr,none": 0.0047007677417355885,
+ "acc_norm,none": 0.8455486954789883,
+ "acc_norm_stderr,none": 0.0036064226236399086,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.74376746724828,
+ "eqbench_stderr,none": 1.9296099363146424,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Silicon-Maid-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "4e43d81f3fff1091df7cb2d85e9e306d25235701",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719901923.6482406,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Silicon-Maid-7B",
+ "model_name_sanitized": "SanjiWatsuki__Silicon-Maid-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 113218.750420381,
+ "end_time": 114658.35620432,
+ "total_evaluation_time_seconds": "1439.6057839390123"
+}
\ No newline at end of file
diff --git a/SanjiWatsuki__Silicon-Maid-7B/results_2024-07-02T06-55-56.426785.json b/SanjiWatsuki__Silicon-Maid-7B/results_2024-07-02T06-55-56.426785.json
new file mode 100644
index 0000000000000000000000000000000000000000..baace808a5a0d07a04e9177bb509393a9fcd2a1d
--- /dev/null
+++ b/SanjiWatsuki__Silicon-Maid-7B/results_2024-07-02T06-55-56.426785.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6676956781517626,
+ "acc_stderr,none": 0.0047007677417355885,
+ "acc_norm,none": 0.8455486954789883,
+ "acc_norm_stderr,none": 0.0036064226236399086,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.74376746724828,
+ "eqbench_stderr,none": 1.9296099363146424,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=SanjiWatsuki/Silicon-Maid-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "4e43d81f3fff1091df7cb2d85e9e306d25235701",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719901923.6482406,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "SanjiWatsuki/Silicon-Maid-7B",
+ "model_name_sanitized": "SanjiWatsuki__Silicon-Maid-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 113218.750420381,
+ "end_time": 114658.35620432,
+ "total_evaluation_time_seconds": "1439.6057839390123"
+}
\ No newline at end of file
diff --git a/Sao10K__Fimbulvetr-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T21-25-41.128938-checkpoint.json b/Sao10K__Fimbulvetr-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T21-25-41.128938-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac1867607657477aed9f16ff4e077bfc7f698a2e
--- /dev/null
+++ b/Sao10K__Fimbulvetr-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T21-25-41.128938-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6694881497709619,
+ "acc_stderr,none": 0.004694360968929443,
+ "acc_norm,none": 0.8580959968133838,
+ "acc_norm_stderr,none": 0.0034823849566329064,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.41948210475555,
+ "eqbench_stderr,none": 2.4500037057733617,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Fimbulvetr-10.7B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "30d93aaba30d8b9eba0ce46fb68a468ea242174a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719866827.6673388,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Fimbulvetr-10.7B-v1",
+ "model_name_sanitized": "Sao10K__Fimbulvetr-10.7B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 78122.684526781,
+ "end_time": 80443.05835636,
+ "total_evaluation_time_seconds": "2320.3738295789954"
+}
\ No newline at end of file
diff --git a/Sao10K__Fimbulvetr-10.7B-v1/results_2024-07-01T21-25-41.128938.json b/Sao10K__Fimbulvetr-10.7B-v1/results_2024-07-01T21-25-41.128938.json
new file mode 100644
index 0000000000000000000000000000000000000000..ac1867607657477aed9f16ff4e077bfc7f698a2e
--- /dev/null
+++ b/Sao10K__Fimbulvetr-10.7B-v1/results_2024-07-01T21-25-41.128938.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6694881497709619,
+ "acc_stderr,none": 0.004694360968929443,
+ "acc_norm,none": 0.8580959968133838,
+ "acc_norm_stderr,none": 0.0034823849566329064,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.41948210475555,
+ "eqbench_stderr,none": 2.4500037057733617,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Fimbulvetr-10.7B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "30d93aaba30d8b9eba0ce46fb68a468ea242174a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719866827.6673388,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Fimbulvetr-10.7B-v1",
+ "model_name_sanitized": "Sao10K__Fimbulvetr-10.7B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 78122.684526781,
+ "end_time": 80443.05835636,
+ "total_evaluation_time_seconds": "2320.3738295789954"
+}
\ No newline at end of file
diff --git a/Sao10K__Fimbulvetr-11B-v2/.ipynb_checkpoints/results_2024-06-28T04-32-22.127106-checkpoint.json b/Sao10K__Fimbulvetr-11B-v2/.ipynb_checkpoints/results_2024-06-28T04-32-22.127106-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..5cc1e6b52820ce88921e2fb513851de136b0eb9e
--- /dev/null
+++ b/Sao10K__Fimbulvetr-11B-v2/.ipynb_checkpoints/results_2024-06-28T04-32-22.127106-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.68123879705238,
+ "acc_stderr,none": 0.004650438781745276,
+ "acc_norm,none": 0.8660625373431587,
+ "acc_norm_stderr,none": 0.003398890525229556,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 70.00837363646892,
+ "eqbench_stderr,none": 2.230997557081673,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Fimbulvetr-11B-v2",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "b2dcd534dc3a53ff84e60a53b87816185169be19",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719546844.0477293,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Fimbulvetr-11B-v2",
+ "model_name_sanitized": "Sao10K__Fimbulvetr-11B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99227.279509843,
+ "end_time": 101532.191916139,
+ "total_evaluation_time_seconds": "2304.912406295989"
+}
\ No newline at end of file
diff --git a/Sao10K__Fimbulvetr-11B-v2/results_2024-06-28T04-32-22.127106.json b/Sao10K__Fimbulvetr-11B-v2/results_2024-06-28T04-32-22.127106.json
new file mode 100644
index 0000000000000000000000000000000000000000..5cc1e6b52820ce88921e2fb513851de136b0eb9e
--- /dev/null
+++ b/Sao10K__Fimbulvetr-11B-v2/results_2024-06-28T04-32-22.127106.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.68123879705238,
+ "acc_stderr,none": 0.004650438781745276,
+ "acc_norm,none": 0.8660625373431587,
+ "acc_norm_stderr,none": 0.003398890525229556,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 70.00837363646892,
+ "eqbench_stderr,none": 2.230997557081673,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Fimbulvetr-11B-v2",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "b2dcd534dc3a53ff84e60a53b87816185169be19",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719546844.0477293,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Fimbulvetr-11B-v2",
+ "model_name_sanitized": "Sao10K__Fimbulvetr-11B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99227.279509843,
+ "end_time": 101532.191916139,
+ "total_evaluation_time_seconds": "2304.912406295989"
+}
\ No newline at end of file
diff --git a/Sao10K__Frostwind-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T20-07-44.450930-checkpoint.json b/Sao10K__Frostwind-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T20-07-44.450930-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb633aef892061658b98dfa7058725cf0a2f8580
--- /dev/null
+++ b/Sao10K__Frostwind-10.7B-v1/.ipynb_checkpoints/results_2024-07-01T20-07-44.450930-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.644991037641904,
+ "acc_stderr,none": 0.004775380866948014,
+ "acc_norm,none": 0.8414658434574785,
+ "acc_norm_stderr,none": 0.0036449467300447647,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 55.726607928145825,
+ "eqbench_stderr,none": 2.887808692501527,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Frostwind-10.7B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "5b465f636e1d354718e393e85914865a64840903",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719862245.485066,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Frostwind-10.7B-v1",
+ "model_name_sanitized": "Sao10K__Frostwind-10.7B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 73540.501016482,
+ "end_time": 75766.380380746,
+ "total_evaluation_time_seconds": "2225.8793642640085"
+}
\ No newline at end of file
diff --git a/Sao10K__Frostwind-10.7B-v1/results_2024-07-01T20-07-44.450930.json b/Sao10K__Frostwind-10.7B-v1/results_2024-07-01T20-07-44.450930.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb633aef892061658b98dfa7058725cf0a2f8580
--- /dev/null
+++ b/Sao10K__Frostwind-10.7B-v1/results_2024-07-01T20-07-44.450930.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.644991037641904,
+ "acc_stderr,none": 0.004775380866948014,
+ "acc_norm,none": 0.8414658434574785,
+ "acc_norm_stderr,none": 0.0036449467300447647,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 55.726607928145825,
+ "eqbench_stderr,none": 2.887808692501527,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Frostwind-10.7B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "5b465f636e1d354718e393e85914865a64840903",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719862245.485066,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Frostwind-10.7B-v1",
+ "model_name_sanitized": "Sao10K__Frostwind-10.7B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 73540.501016482,
+ "end_time": 75766.380380746,
+ "total_evaluation_time_seconds": "2225.8793642640085"
+}
\ No newline at end of file
diff --git a/Sao10K__Solstice-11B-v1/.ipynb_checkpoints/results_2024-07-01T20-47-26.616675-checkpoint.json b/Sao10K__Solstice-11B-v1/.ipynb_checkpoints/results_2024-07-01T20-47-26.616675-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..00391cd6d03c684c8a8d7ad8d2ad84b100abbe56
--- /dev/null
+++ b/Sao10K__Solstice-11B-v1/.ipynb_checkpoints/results_2024-07-01T20-47-26.616675-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6811392152957578,
+ "acc_stderr,none": 0.004650825168905211,
+ "acc_norm,none": 0.8641704839673372,
+ "acc_norm_stderr,none": 0.003419072480735363,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.23650356231931,
+ "eqbench_stderr,none": 2.2992289426525265,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.584795321637429,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Solstice-11B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "6db53c612c43e7b34c2f76a662abbd8e19b0c5f6",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719864640.8746834,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Solstice-11B-v1",
+ "model_name_sanitized": "Sao10K__Solstice-11B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 75935.867073192,
+ "end_time": 78148.546093816,
+ "total_evaluation_time_seconds": "2212.6790206239966"
+}
\ No newline at end of file
diff --git a/Sao10K__Solstice-11B-v1/results_2024-07-01T20-47-26.616675.json b/Sao10K__Solstice-11B-v1/results_2024-07-01T20-47-26.616675.json
new file mode 100644
index 0000000000000000000000000000000000000000..00391cd6d03c684c8a8d7ad8d2ad84b100abbe56
--- /dev/null
+++ b/Sao10K__Solstice-11B-v1/results_2024-07-01T20-47-26.616675.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6811392152957578,
+ "acc_stderr,none": 0.004650825168905211,
+ "acc_norm,none": 0.8641704839673372,
+ "acc_norm_stderr,none": 0.003419072480735363,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.23650356231931,
+ "eqbench_stderr,none": 2.2992289426525265,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.584795321637429,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Sao10K/Solstice-11B-v1,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "6db53c612c43e7b34c2f76a662abbd8e19b0c5f6",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719864640.8746834,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Sao10K/Solstice-11B-v1",
+ "model_name_sanitized": "Sao10K__Solstice-11B-v1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 75935.867073192,
+ "end_time": 78148.546093816,
+ "total_evaluation_time_seconds": "2212.6790206239966"
+}
\ No newline at end of file
diff --git a/TeeZee__DarkSapling-7B-v2.0/.ipynb_checkpoints/results_2024-07-02T03-18-06.078821-checkpoint.json b/TeeZee__DarkSapling-7B-v2.0/.ipynb_checkpoints/results_2024-07-02T03-18-06.078821-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..43ac901cd595b580bbe836c73ad78cdd15e94ecf
--- /dev/null
+++ b/TeeZee__DarkSapling-7B-v2.0/.ipynb_checkpoints/results_2024-07-02T03-18-06.078821-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.637024497112129,
+ "acc_stderr,none": 0.0047987512815608575,
+ "acc_norm,none": 0.8256323441545509,
+ "acc_norm_stderr,none": 0.0037864988567691974,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 62.191078817329824,
+ "eqbench_stderr,none": 2.466355668906657,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=TeeZee/DarkSapling-7B-v2.0,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "0290b688e6aa6620f88ead6903487078ab24f96c",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719888689.565892,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "TeeZee/DarkSapling-7B-v2.0",
+ "model_name_sanitized": "TeeZee__DarkSapling-7B-v2.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99984.687478048,
+ "end_time": 101588.008165402,
+ "total_evaluation_time_seconds": "1603.320687354004"
+}
\ No newline at end of file
diff --git a/TeeZee__DarkSapling-7B-v2.0/results_2024-07-02T03-18-06.078821.json b/TeeZee__DarkSapling-7B-v2.0/results_2024-07-02T03-18-06.078821.json
new file mode 100644
index 0000000000000000000000000000000000000000..43ac901cd595b580bbe836c73ad78cdd15e94ecf
--- /dev/null
+++ b/TeeZee__DarkSapling-7B-v2.0/results_2024-07-02T03-18-06.078821.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.637024497112129,
+ "acc_stderr,none": 0.0047987512815608575,
+ "acc_norm,none": 0.8256323441545509,
+ "acc_norm_stderr,none": 0.0037864988567691974,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 62.191078817329824,
+ "eqbench_stderr,none": 2.466355668906657,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=TeeZee/DarkSapling-7B-v2.0,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "0290b688e6aa6620f88ead6903487078ab24f96c",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719888689.565892,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "TeeZee/DarkSapling-7B-v2.0",
+ "model_name_sanitized": "TeeZee__DarkSapling-7B-v2.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99984.687478048,
+ "end_time": 101588.008165402,
+ "total_evaluation_time_seconds": "1603.320687354004"
+}
\ No newline at end of file
diff --git a/TheDrummer__Moistral-11B-v3/.ipynb_checkpoints/results_2024-07-02T00-08-37.869624-checkpoint.json b/TheDrummer__Moistral-11B-v3/.ipynb_checkpoints/results_2024-07-02T00-08-37.869624-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..3b8defd3fcf31172cc7c5061965a8bab8bac34e1
--- /dev/null
+++ b/TheDrummer__Moistral-11B-v3/.ipynb_checkpoints/results_2024-07-02T00-08-37.869624-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6810396335391357,
+ "acc_stderr,none": 0.004651211311633819,
+ "acc_norm,none": 0.8664608643696475,
+ "acc_norm_stderr,none": 0.003394613020442028,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.75227204221265,
+ "eqbench_stderr,none": 2.2281362165770795,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=TheDrummer/Moistral-11B-v3,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c061f49aad10acb47e40cf63e7840a906a3ad2c1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719876604.3651178,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "TheDrummer/Moistral-11B-v3",
+ "model_name_sanitized": "TheDrummer__Moistral-11B-v3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87899.523081436,
+ "end_time": 90219.799023715,
+ "total_evaluation_time_seconds": "2320.275942278997"
+}
\ No newline at end of file
diff --git a/TheDrummer__Moistral-11B-v3/results_2024-07-02T00-08-37.869624.json b/TheDrummer__Moistral-11B-v3/results_2024-07-02T00-08-37.869624.json
new file mode 100644
index 0000000000000000000000000000000000000000..3b8defd3fcf31172cc7c5061965a8bab8bac34e1
--- /dev/null
+++ b/TheDrummer__Moistral-11B-v3/results_2024-07-02T00-08-37.869624.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6810396335391357,
+ "acc_stderr,none": 0.004651211311633819,
+ "acc_norm,none": 0.8664608643696475,
+ "acc_norm_stderr,none": 0.003394613020442028,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.75227204221265,
+ "eqbench_stderr,none": 2.2281362165770795,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374293,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=TheDrummer/Moistral-11B-v3,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c061f49aad10acb47e40cf63e7840a906a3ad2c1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719876604.3651178,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "TheDrummer/Moistral-11B-v3",
+ "model_name_sanitized": "TheDrummer__Moistral-11B-v3",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87899.523081436,
+ "end_time": 90219.799023715,
+ "total_evaluation_time_seconds": "2320.275942278997"
+}
\ No newline at end of file
diff --git a/Undi95__Borealis-10.7B/.ipynb_checkpoints/results_2024-07-02T01-25-42.423826-checkpoint.json b/Undi95__Borealis-10.7B/.ipynb_checkpoints/results_2024-07-02T01-25-42.423826-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..683220acbbfe507edf8d8c9f2985a6cc5a56fd02
--- /dev/null
+++ b/Undi95__Borealis-10.7B/.ipynb_checkpoints/results_2024-07-02T01-25-42.423826-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5981876120294762,
+ "acc_stderr,none": 0.00489262449093723,
+ "acc_norm,none": 0.7957578171678948,
+ "acc_norm_stderr,none": 0.0040232284614061305,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 8.265798945931458,
+ "eqbench_stderr,none": 2.400702056761244,
+ "percent_parseable,none": 44.44444444444444,
+ "percent_parseable_stderr,none": 3.811079669833523,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Undi95/Borealis-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731556864,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b2c1ed5e4b64dc8499e7c4a5bcc0c7fefed738cb",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719881280.6823523,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Undi95/Borealis-10.7B",
+ "model_name_sanitized": "Undi95__Borealis-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 92575.714752079,
+ "end_time": 94844.35324392,
+ "total_evaluation_time_seconds": "2268.638491841004"
+}
\ No newline at end of file
diff --git a/Undi95__Borealis-10.7B/results_2024-07-02T01-25-42.423826.json b/Undi95__Borealis-10.7B/results_2024-07-02T01-25-42.423826.json
new file mode 100644
index 0000000000000000000000000000000000000000..683220acbbfe507edf8d8c9f2985a6cc5a56fd02
--- /dev/null
+++ b/Undi95__Borealis-10.7B/results_2024-07-02T01-25-42.423826.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5981876120294762,
+ "acc_stderr,none": 0.00489262449093723,
+ "acc_norm,none": 0.7957578171678948,
+ "acc_norm_stderr,none": 0.0040232284614061305,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 8.265798945931458,
+ "eqbench_stderr,none": 2.400702056761244,
+ "percent_parseable,none": 44.44444444444444,
+ "percent_parseable_stderr,none": 3.811079669833523,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Undi95/Borealis-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731556864,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b2c1ed5e4b64dc8499e7c4a5bcc0c7fefed738cb",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719881280.6823523,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Undi95/Borealis-10.7B",
+ "model_name_sanitized": "Undi95__Borealis-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 92575.714752079,
+ "end_time": 94844.35324392,
+ "total_evaluation_time_seconds": "2268.638491841004"
+}
\ No newline at end of file
diff --git a/Undi95__Toppy-M-7B/.ipynb_checkpoints/results_2024-06-28T02-28-16.478931-checkpoint.json b/Undi95__Toppy-M-7B/.ipynb_checkpoints/results_2024-06-28T02-28-16.478931-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..9fa3366947926fd154fdb13641c8e682c73fb7ce
--- /dev/null
+++ b/Undi95__Toppy-M-7B/.ipynb_checkpoints/results_2024-06-28T02-28-16.478931-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6571400119498108,
+ "acc_stderr,none": 0.00473695081061781,
+ "acc_norm,none": 0.8351921927902808,
+ "acc_norm_stderr,none": 0.003702487662126953,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 66.56565114431275,
+ "eqbench_stderr,none": 2.1832557339862837,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Undi95/Toppy-M-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "5d0c492effbb2e52ea04e5100c6ce02eba48a793",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719540198.0639265,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Undi95/Toppy-M-7B",
+ "model_name_sanitized": "Undi95__Toppy-M-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 92581.210827571,
+ "end_time": 94086.543624108,
+ "total_evaluation_time_seconds": "1505.3327965369972"
+}
\ No newline at end of file
diff --git a/Undi95__Toppy-M-7B/results_2024-06-28T02-28-16.478931.json b/Undi95__Toppy-M-7B/results_2024-06-28T02-28-16.478931.json
new file mode 100644
index 0000000000000000000000000000000000000000..9fa3366947926fd154fdb13641c8e682c73fb7ce
--- /dev/null
+++ b/Undi95__Toppy-M-7B/results_2024-06-28T02-28-16.478931.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6571400119498108,
+ "acc_stderr,none": 0.00473695081061781,
+ "acc_norm,none": 0.8351921927902808,
+ "acc_norm_stderr,none": 0.003702487662126953,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 66.56565114431275,
+ "eqbench_stderr,none": 2.1832557339862837,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=Undi95/Toppy-M-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "5d0c492effbb2e52ea04e5100c6ce02eba48a793",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719540198.0639265,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "Undi95/Toppy-M-7B",
+ "model_name_sanitized": "Undi95__Toppy-M-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 92581.210827571,
+ "end_time": 94086.543624108,
+ "total_evaluation_time_seconds": "1505.3327965369972"
+}
\ No newline at end of file
diff --git a/argilla__CapybaraHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T04-12-24.235824-checkpoint.json b/argilla__CapybaraHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T04-12-24.235824-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..6bb072ee714a1f37d926231cf2ce2da3932cc4b3
--- /dev/null
+++ b/argilla__CapybaraHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T04-12-24.235824-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.648177653853814,
+ "acc_stderr,none": 0.004765629263643518,
+ "acc_norm,none": 0.8303126867157936,
+ "acc_norm_stderr,none": 0.0037459074237766016,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 66.52043170781761,
+ "eqbench_stderr,none": 2.1880719529261583,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=argilla/CapybaraHermes-2.5-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "d06c86726aadd8dadb92c5b9b9e3ce8ef246c471",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719891965.445841,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "argilla/CapybaraHermes-2.5-Mistral-7B",
+ "model_name_sanitized": "argilla__CapybaraHermes-2.5-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 103260.591614457,
+ "end_time": 104846.165257457,
+ "total_evaluation_time_seconds": "1585.5736430000106"
+}
\ No newline at end of file
diff --git a/argilla__CapybaraHermes-2.5-Mistral-7B/results_2024-07-02T04-12-24.235824.json b/argilla__CapybaraHermes-2.5-Mistral-7B/results_2024-07-02T04-12-24.235824.json
new file mode 100644
index 0000000000000000000000000000000000000000..6bb072ee714a1f37d926231cf2ce2da3932cc4b3
--- /dev/null
+++ b/argilla__CapybaraHermes-2.5-Mistral-7B/results_2024-07-02T04-12-24.235824.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.648177653853814,
+ "acc_stderr,none": 0.004765629263643518,
+ "acc_norm,none": 0.8303126867157936,
+ "acc_norm_stderr,none": 0.0037459074237766016,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 66.52043170781761,
+ "eqbench_stderr,none": 2.1880719529261583,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=argilla/CapybaraHermes-2.5-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "d06c86726aadd8dadb92c5b9b9e3ce8ef246c471",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719891965.445841,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "argilla/CapybaraHermes-2.5-Mistral-7B",
+ "model_name_sanitized": "argilla__CapybaraHermes-2.5-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 103260.591614457,
+ "end_time": 104846.165257457,
+ "total_evaluation_time_seconds": "1585.5736430000106"
+}
\ No newline at end of file
diff --git a/athirdpath__NSFW_DPO_vmgb-7b/.ipynb_checkpoints/results_2024-06-28T02-55-12.160237-checkpoint.json b/athirdpath__NSFW_DPO_vmgb-7b/.ipynb_checkpoints/results_2024-06-28T02-55-12.160237-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b11f58fd9d4745c4653089b04c59aeae077916fd
--- /dev/null
+++ b/athirdpath__NSFW_DPO_vmgb-7b/.ipynb_checkpoints/results_2024-06-28T02-55-12.160237-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6730730930093607,
+ "acc_stderr,none": 0.0046813160644444095,
+ "acc_norm,none": 0.8536148177653854,
+ "acc_norm_stderr,none": 0.0035276951498235012,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 74.82935321697907,
+ "eqbench_stderr,none": 1.6591997145588517,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=athirdpath/NSFW_DPO_vmgb-7b,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b667d7810267685cd4f32a8f82044e419c010abe",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719541817.2830012,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "athirdpath/NSFW_DPO_vmgb-7b",
+ "model_name_sanitized": "athirdpath__NSFW_DPO_vmgb-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 94200.513127129,
+ "end_time": 95702.225070561,
+ "total_evaluation_time_seconds": "1501.7119434320048"
+}
\ No newline at end of file
diff --git a/athirdpath__NSFW_DPO_vmgb-7b/results_2024-06-28T02-55-12.160237.json b/athirdpath__NSFW_DPO_vmgb-7b/results_2024-06-28T02-55-12.160237.json
new file mode 100644
index 0000000000000000000000000000000000000000..b11f58fd9d4745c4653089b04c59aeae077916fd
--- /dev/null
+++ b/athirdpath__NSFW_DPO_vmgb-7b/results_2024-06-28T02-55-12.160237.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6730730930093607,
+ "acc_stderr,none": 0.0046813160644444095,
+ "acc_norm,none": 0.8536148177653854,
+ "acc_norm_stderr,none": 0.0035276951498235012,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 74.82935321697907,
+ "eqbench_stderr,none": 1.6591997145588517,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=athirdpath/NSFW_DPO_vmgb-7b,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "b667d7810267685cd4f32a8f82044e419c010abe",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719541817.2830012,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "athirdpath/NSFW_DPO_vmgb-7b",
+ "model_name_sanitized": "athirdpath__NSFW_DPO_vmgb-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 94200.513127129,
+ "end_time": 95702.225070561,
+ "total_evaluation_time_seconds": "1501.7119434320048"
+}
\ No newline at end of file
diff --git a/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/.ipynb_checkpoints/results_2024-07-02T00-45-44.704724-checkpoint.json b/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/.ipynb_checkpoints/results_2024-07-02T00-45-44.704724-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ea4f1eff6669b6918c9aef362faf427a8901b9
--- /dev/null
+++ b/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/.ipynb_checkpoints/results_2024-07-02T00-45-44.704724-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6837283409679347,
+ "acc_stderr,none": 0.004640699483543287,
+ "acc_norm,none": 0.8599880501892053,
+ "acc_norm_stderr,none": 0.0034629026011361993,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.25278425131637,
+ "eqbench_stderr,none": 2.0742624474888856,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e47cd0e9dd63b41f649b8da31651689067ac217b",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719878717.9026196,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B",
+ "model_name_sanitized": "backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90012.968120256,
+ "end_time": 92446.634152182,
+ "total_evaluation_time_seconds": "2433.6660319259972"
+}
\ No newline at end of file
diff --git a/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/results_2024-07-02T00-45-44.704724.json b/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/results_2024-07-02T00-45-44.704724.json
new file mode 100644
index 0000000000000000000000000000000000000000..44ea4f1eff6669b6918c9aef362faf427a8901b9
--- /dev/null
+++ b/backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B/results_2024-07-02T00-45-44.704724.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6837283409679347,
+ "acc_stderr,none": 0.004640699483543287,
+ "acc_norm,none": 0.8599880501892053,
+ "acc_norm_stderr,none": 0.0034629026011361993,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.25278425131637,
+ "eqbench_stderr,none": 2.0742624474888856,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "e47cd0e9dd63b41f649b8da31651689067ac217b",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719878717.9026196,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B",
+ "model_name_sanitized": "backyardai__Fimbulvetr-Holodeck-Erebus-Westlake-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 90012.968120256,
+ "end_time": 92446.634152182,
+ "total_evaluation_time_seconds": "2433.6660319259972"
+}
\ No newline at end of file
diff --git a/berkeley-nest__Starling-LM-7B-alpha/.ipynb_checkpoints/results_2024-07-02T05-17-10.530751-checkpoint.json b/berkeley-nest__Starling-LM-7B-alpha/.ipynb_checkpoints/results_2024-07-02T05-17-10.530751-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..af0058cce7f1f861696f868a53ea70451934dc6f
--- /dev/null
+++ b/berkeley-nest__Starling-LM-7B-alpha/.ipynb_checkpoints/results_2024-07-02T05-17-10.530751-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.642899820752838,
+ "acc_stderr,none": 0.004781654610857148,
+ "acc_norm,none": 0.8217486556462856,
+ "acc_norm_stderr,none": 0.0038194200585540956,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.78303111392624,
+ "eqbench_stderr,none": 2.1554319334518173,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=berkeley-nest/Starling-LM-7B-alpha,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "1dddf3b95bc1391f6307299eb1c162c194bde9bd",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719895983.4735777,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "<|end_of_turn|>",
+ 32000
+ ],
+ "tokenizer_eos_token": [
+ "<|end_of_turn|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "berkeley-nest/Starling-LM-7B-alpha",
+ "model_name_sanitized": "berkeley-nest__Starling-LM-7B-alpha",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 107278.552702596,
+ "end_time": 108732.460166266,
+ "total_evaluation_time_seconds": "1453.9074636699952"
+}
\ No newline at end of file
diff --git a/berkeley-nest__Starling-LM-7B-alpha/results_2024-07-02T05-17-10.530751.json b/berkeley-nest__Starling-LM-7B-alpha/results_2024-07-02T05-17-10.530751.json
new file mode 100644
index 0000000000000000000000000000000000000000..af0058cce7f1f861696f868a53ea70451934dc6f
--- /dev/null
+++ b/berkeley-nest__Starling-LM-7B-alpha/results_2024-07-02T05-17-10.530751.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.642899820752838,
+ "acc_stderr,none": 0.004781654610857148,
+ "acc_norm,none": 0.8217486556462856,
+ "acc_norm_stderr,none": 0.0038194200585540956,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 69.78303111392624,
+ "eqbench_stderr,none": 2.1554319334518173,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=berkeley-nest/Starling-LM-7B-alpha,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "1dddf3b95bc1391f6307299eb1c162c194bde9bd",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719895983.4735777,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "<|end_of_turn|>",
+ 32000
+ ],
+ "tokenizer_eos_token": [
+ "<|end_of_turn|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 8192,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "berkeley-nest/Starling-LM-7B-alpha",
+ "model_name_sanitized": "berkeley-nest__Starling-LM-7B-alpha",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 107278.552702596,
+ "end_time": 108732.460166266,
+ "total_evaluation_time_seconds": "1453.9074636699952"
+}
\ No newline at end of file
diff --git a/cgato__Thespis-Mistral-7b-v0.6/.ipynb_checkpoints/results_2024-07-02T03-55-19.886617-checkpoint.json b/cgato__Thespis-Mistral-7b-v0.6/.ipynb_checkpoints/results_2024-07-02T03-55-19.886617-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..cac92b3a88dd014b695045396135f8d569499615
--- /dev/null
+++ b/cgato__Thespis-Mistral-7b-v0.6/.ipynb_checkpoints/results_2024-07-02T03-55-19.886617-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6221868153754232,
+ "acc_stderr,none": 0.004838496966823936,
+ "acc_norm,none": 0.818263294164509,
+ "acc_norm_stderr,none": 0.003848392656939309,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 29.12698576180375,
+ "eqbench_stderr,none": 3.2541125218508933,
+ "percent_parseable,none": 79.53216374269006,
+ "percent_parseable_stderr,none": 3.0944459778533218,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cgato/Thespis-Mistral-7b-v0.6,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "8f02867239bb861de358813db56bd68440534553",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719891027.5687327,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cgato/Thespis-Mistral-7b-v0.6",
+ "model_name_sanitized": "cgato__Thespis-Mistral-7b-v0.6",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 102322.69548256,
+ "end_time": 103821.816040654,
+ "total_evaluation_time_seconds": "1499.1205580940004"
+}
\ No newline at end of file
diff --git a/cgato__Thespis-Mistral-7b-v0.6/results_2024-07-02T03-55-19.886617.json b/cgato__Thespis-Mistral-7b-v0.6/results_2024-07-02T03-55-19.886617.json
new file mode 100644
index 0000000000000000000000000000000000000000..cac92b3a88dd014b695045396135f8d569499615
--- /dev/null
+++ b/cgato__Thespis-Mistral-7b-v0.6/results_2024-07-02T03-55-19.886617.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6221868153754232,
+ "acc_stderr,none": 0.004838496966823936,
+ "acc_norm,none": 0.818263294164509,
+ "acc_norm_stderr,none": 0.003848392656939309,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 29.12698576180375,
+ "eqbench_stderr,none": 3.2541125218508933,
+ "percent_parseable,none": 79.53216374269006,
+ "percent_parseable_stderr,none": 3.0944459778533218,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cgato/Thespis-Mistral-7b-v0.6,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "8f02867239bb861de358813db56bd68440534553",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719891027.5687327,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cgato/Thespis-Mistral-7b-v0.6",
+ "model_name_sanitized": "cgato__Thespis-Mistral-7b-v0.6",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 102322.69548256,
+ "end_time": 103821.816040654,
+ "total_evaluation_time_seconds": "1499.1205580940004"
+}
\ No newline at end of file
diff --git a/chargoddard__loyal-piano-m7/.ipynb_checkpoints/results_2024-07-02T04-51-42.336742-checkpoint.json b/chargoddard__loyal-piano-m7/.ipynb_checkpoints/results_2024-07-02T04-51-42.336742-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..1fb2fc2e0b4c079327961faa4da3dbf26a043d82
--- /dev/null
+++ b/chargoddard__loyal-piano-m7/.ipynb_checkpoints/results_2024-07-02T04-51-42.336742-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6439952200756821,
+ "acc_stderr,none": 0.004778380758851119,
+ "acc_norm,none": 0.8328022306313483,
+ "acc_norm_stderr,none": 0.003723897305645462,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 72.14962256589271,
+ "eqbench_stderr,none": 2.006126552035965,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=chargoddard/loyal-piano-m7,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "9ad4f1ce2cf75c3ab54af6f9872356cb199b330e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719894335.7498412,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "chargoddard/loyal-piano-m7",
+ "model_name_sanitized": "chargoddard__loyal-piano-m7",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 105630.829033957,
+ "end_time": 107204.26616981,
+ "total_evaluation_time_seconds": "1573.4371358530043"
+}
\ No newline at end of file
diff --git a/chargoddard__loyal-piano-m7/results_2024-07-02T04-51-42.336742.json b/chargoddard__loyal-piano-m7/results_2024-07-02T04-51-42.336742.json
new file mode 100644
index 0000000000000000000000000000000000000000..1fb2fc2e0b4c079327961faa4da3dbf26a043d82
--- /dev/null
+++ b/chargoddard__loyal-piano-m7/results_2024-07-02T04-51-42.336742.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6439952200756821,
+ "acc_stderr,none": 0.004778380758851119,
+ "acc_norm,none": 0.8328022306313483,
+ "acc_norm_stderr,none": 0.003723897305645462,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 72.14962256589271,
+ "eqbench_stderr,none": 2.006126552035965,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=chargoddard/loyal-piano-m7,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "9ad4f1ce2cf75c3ab54af6f9872356cb199b330e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719894335.7498412,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "chargoddard/loyal-piano-m7",
+ "model_name_sanitized": "chargoddard__loyal-piano-m7",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 105630.829033957,
+ "end_time": 107204.26616981,
+ "total_evaluation_time_seconds": "1573.4371358530043"
+}
\ No newline at end of file
diff --git a/cognitivecomputations__dolphin-2.2.1-mistral-7b/.ipynb_checkpoints/results_2024-07-02T06-02-40.816103-checkpoint.json b/cognitivecomputations__dolphin-2.2.1-mistral-7b/.ipynb_checkpoints/results_2024-07-02T06-02-40.816103-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b0fda1c829dc1c6f8a83b650b9473c86e7f44c6b
--- /dev/null
+++ b/cognitivecomputations__dolphin-2.2.1-mistral-7b/.ipynb_checkpoints/results_2024-07-02T06-02-40.816103-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6285600477992431,
+ "acc_stderr,none": 0.004822022254886004,
+ "acc_norm,none": 0.8146783509261103,
+ "acc_norm_stderr,none": 0.003877641746375665,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 67.32561567936949,
+ "eqbench_stderr,none": 2.414051136188407,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cognitivecomputations/dolphin-2.2.1-mistral-7b,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "20f78ab87598cab137c8ce00855464cae403a3fd",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719898636.197451,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cognitivecomputations/dolphin-2.2.1-mistral-7b",
+ "model_name_sanitized": "cognitivecomputations__dolphin-2.2.1-mistral-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 109931.241971652,
+ "end_time": 111462.745517669,
+ "total_evaluation_time_seconds": "1531.5035460170038"
+}
\ No newline at end of file
diff --git a/cognitivecomputations__dolphin-2.2.1-mistral-7b/results_2024-07-02T06-02-40.816103.json b/cognitivecomputations__dolphin-2.2.1-mistral-7b/results_2024-07-02T06-02-40.816103.json
new file mode 100644
index 0000000000000000000000000000000000000000..b0fda1c829dc1c6f8a83b650b9473c86e7f44c6b
--- /dev/null
+++ b/cognitivecomputations__dolphin-2.2.1-mistral-7b/results_2024-07-02T06-02-40.816103.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6285600477992431,
+ "acc_stderr,none": 0.004822022254886004,
+ "acc_norm,none": 0.8146783509261103,
+ "acc_norm_stderr,none": 0.003877641746375665,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 67.32561567936949,
+ "eqbench_stderr,none": 2.414051136188407,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cognitivecomputations/dolphin-2.2.1-mistral-7b,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "20f78ab87598cab137c8ce00855464cae403a3fd",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719898636.197451,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cognitivecomputations/dolphin-2.2.1-mistral-7b",
+ "model_name_sanitized": "cognitivecomputations__dolphin-2.2.1-mistral-7b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 109931.241971652,
+ "end_time": 111462.745517669,
+ "total_evaluation_time_seconds": "1531.5035460170038"
+}
\ No newline at end of file
diff --git a/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/.ipynb_checkpoints/results_2024-07-02T04-21-44.877903-checkpoint.json b/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/.ipynb_checkpoints/results_2024-07-02T04-21-44.877903-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e423c6ef4c987cf7128811db52f55522f465a73
--- /dev/null
+++ b/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/.ipynb_checkpoints/results_2024-07-02T04-21-44.877903-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6483768173670583,
+ "acc_stderr,none": 0.004765012078929417,
+ "acc_norm,none": 0.8360884285998805,
+ "acc_norm_stderr,none": 0.003694387361177522,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 71.57020405475217,
+ "eqbench_stderr,none": 2.0257691517275624,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374273,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser,trust_remote_code=True",
+ "model_num_parameters": 7241740288,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "95999a69464ffcf4e25854208271000dfc255696",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719892564.0480487,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "<|im_end|>",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser",
+ "model_name_sanitized": "cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 103859.073714916,
+ "end_time": 105406.807341044,
+ "total_evaluation_time_seconds": "1547.7336261279997"
+}
\ No newline at end of file
diff --git a/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/results_2024-07-02T04-21-44.877903.json b/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/results_2024-07-02T04-21-44.877903.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e423c6ef4c987cf7128811db52f55522f465a73
--- /dev/null
+++ b/cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser/results_2024-07-02T04-21-44.877903.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6483768173670583,
+ "acc_stderr,none": 0.004765012078929417,
+ "acc_norm,none": 0.8360884285998805,
+ "acc_norm_stderr,none": 0.003694387361177522,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 71.57020405475217,
+ "eqbench_stderr,none": 2.0257691517275624,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374273,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser,trust_remote_code=True",
+ "model_num_parameters": 7241740288,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "95999a69464ffcf4e25854208271000dfc255696",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719892564.0480487,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "<|im_end|>",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "cognitivecomputations/dolphin-2.6-mistral-7b-dpo-laser",
+ "model_name_sanitized": "cognitivecomputations__dolphin-2.6-mistral-7b-dpo-laser",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 103859.073714916,
+ "end_time": 105406.807341044,
+ "total_evaluation_time_seconds": "1547.7336261279997"
+}
\ No newline at end of file
diff --git a/froggeric__WestLake-10.7B-v2/.ipynb_checkpoints/results_2024-07-01T22-07-10.044094-checkpoint.json b/froggeric__WestLake-10.7B-v2/.ipynb_checkpoints/results_2024-07-01T22-07-10.044094-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..37beb8d423da238b3dc94fc14aced0023b1c3590
--- /dev/null
+++ b/froggeric__WestLake-10.7B-v2/.ipynb_checkpoints/results_2024-07-01T22-07-10.044094-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6982672774347739,
+ "acc_stderr,none": 0.004580718115992501,
+ "acc_norm,none": 0.8673571001792472,
+ "acc_norm_stderr,none": 0.0033849518032134326,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.35343055205183,
+ "eqbench_stderr,none": 2.018556635561309,
+ "percent_parseable,none": 95.32163742690058,
+ "percent_parseable_stderr,none": 1.619638995623559,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=froggeric/WestLake-10.7B-v2,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "de1f0f286ef6d5a6e10627ac05f8cfb9baaa36a5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719869203.7996294,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "froggeric/WestLake-10.7B-v2",
+ "model_name_sanitized": "froggeric__WestLake-10.7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 80498.777827703,
+ "end_time": 82931.973511543,
+ "total_evaluation_time_seconds": "2433.195683839993"
+}
\ No newline at end of file
diff --git a/froggeric__WestLake-10.7B-v2/results_2024-07-01T22-07-10.044094.json b/froggeric__WestLake-10.7B-v2/results_2024-07-01T22-07-10.044094.json
new file mode 100644
index 0000000000000000000000000000000000000000..37beb8d423da238b3dc94fc14aced0023b1c3590
--- /dev/null
+++ b/froggeric__WestLake-10.7B-v2/results_2024-07-01T22-07-10.044094.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6982672774347739,
+ "acc_stderr,none": 0.004580718115992501,
+ "acc_norm,none": 0.8673571001792472,
+ "acc_norm_stderr,none": 0.0033849518032134326,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.35343055205183,
+ "eqbench_stderr,none": 2.018556635561309,
+ "percent_parseable,none": 95.32163742690058,
+ "percent_parseable_stderr,none": 1.619638995623559,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=froggeric/WestLake-10.7B-v2,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "de1f0f286ef6d5a6e10627ac05f8cfb9baaa36a5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719869203.7996294,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "froggeric/WestLake-10.7B-v2",
+ "model_name_sanitized": "froggeric__WestLake-10.7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 80498.777827703,
+ "end_time": 82931.973511543,
+ "total_evaluation_time_seconds": "2433.195683839993"
+}
\ No newline at end of file
diff --git a/head-empty-ai__Mytho-Lemon-11B/.ipynb_checkpoints/results_2024-07-02T03-27-57.446245-checkpoint.json b/head-empty-ai__Mytho-Lemon-11B/.ipynb_checkpoints/results_2024-07-02T03-27-57.446245-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..35a78370342e6c03b96867b92f03f887f7a31e72
--- /dev/null
+++ b/head-empty-ai__Mytho-Lemon-11B/.ipynb_checkpoints/results_2024-07-02T03-27-57.446245-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6298546106353317,
+ "acc_stderr,none": 0.004818566366066908,
+ "acc_norm,none": 0.7977494523003386,
+ "acc_norm_stderr,none": 0.0040085714314836915,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 60.025207476634726,
+ "eqbench_stderr,none": 2.7274520011253087,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=head-empty-ai/Mytho-Lemon-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "793a2d1bf7fe26da4824e13b7992465f2f4765d6",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719888600.6598475,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "head-empty-ai/Mytho-Lemon-11B",
+ "model_name_sanitized": "head-empty-ai__Mytho-Lemon-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99895.742790118,
+ "end_time": 102179.375651272,
+ "total_evaluation_time_seconds": "2283.632861153994"
+}
\ No newline at end of file
diff --git a/head-empty-ai__Mytho-Lemon-11B/results_2024-07-02T03-27-57.446245.json b/head-empty-ai__Mytho-Lemon-11B/results_2024-07-02T03-27-57.446245.json
new file mode 100644
index 0000000000000000000000000000000000000000..35a78370342e6c03b96867b92f03f887f7a31e72
--- /dev/null
+++ b/head-empty-ai__Mytho-Lemon-11B/results_2024-07-02T03-27-57.446245.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6298546106353317,
+ "acc_stderr,none": 0.004818566366066908,
+ "acc_norm,none": 0.7977494523003386,
+ "acc_norm_stderr,none": 0.0040085714314836915,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 60.025207476634726,
+ "eqbench_stderr,none": 2.7274520011253087,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=head-empty-ai/Mytho-Lemon-11B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "793a2d1bf7fe26da4824e13b7992465f2f4765d6",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 8
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719888600.6598475,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "head-empty-ai/Mytho-Lemon-11B",
+ "model_name_sanitized": "head-empty-ai__Mytho-Lemon-11B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 99895.742790118,
+ "end_time": 102179.375651272,
+ "total_evaluation_time_seconds": "2283.632861153994"
+}
\ No newline at end of file
diff --git a/jondurbin__airoboros-m-7b-3.1.2/.ipynb_checkpoints/results_2024-06-27T21-27-37.734965-checkpoint.json b/jondurbin__airoboros-m-7b-3.1.2/.ipynb_checkpoints/results_2024-06-27T21-27-37.734965-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b4db605829ba4cdf02dda95465ecd3662dc888cf
--- /dev/null
+++ b/jondurbin__airoboros-m-7b-3.1.2/.ipynb_checkpoints/results_2024-06-27T21-27-37.734965-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6262696673969329,
+ "acc_stderr,none": 0.0048280457747349,
+ "acc_norm,none": 0.8133837880900219,
+ "acc_norm_stderr,none": 0.0038880689432920544,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 38.51541437922841,
+ "eqbench_stderr,none": 3.511567923871361,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=jondurbin/airoboros-m-7b-3.1.2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "e9a7f0271fa442d65bf6be87feeb3f4de2f5760e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719522153.290284,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "jondurbin/airoboros-m-7b-3.1.2",
+ "model_name_sanitized": "jondurbin__airoboros-m-7b-3.1.2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 74536.468290869,
+ "end_time": 76047.79978283,
+ "total_evaluation_time_seconds": "1511.3314919609984"
+}
\ No newline at end of file
diff --git a/jondurbin__airoboros-m-7b-3.1.2/results_2024-06-27T21-27-37.734965.json b/jondurbin__airoboros-m-7b-3.1.2/results_2024-06-27T21-27-37.734965.json
new file mode 100644
index 0000000000000000000000000000000000000000..b4db605829ba4cdf02dda95465ecd3662dc888cf
--- /dev/null
+++ b/jondurbin__airoboros-m-7b-3.1.2/results_2024-06-27T21-27-37.734965.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6262696673969329,
+ "acc_stderr,none": 0.0048280457747349,
+ "acc_norm,none": 0.8133837880900219,
+ "acc_norm_stderr,none": 0.0038880689432920544,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 38.51541437922841,
+ "eqbench_stderr,none": 3.511567923871361,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=jondurbin/airoboros-m-7b-3.1.2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "e9a7f0271fa442d65bf6be87feeb3f4de2f5760e",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719522153.290284,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "jondurbin/airoboros-m-7b-3.1.2",
+ "model_name_sanitized": "jondurbin__airoboros-m-7b-3.1.2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 74536.468290869,
+ "end_time": 76047.79978283,
+ "total_evaluation_time_seconds": "1511.3314919609984"
+}
\ No newline at end of file
diff --git a/jondurbin__cinematika-7b-v0.1/.ipynb_checkpoints/results_2024-06-27T23-16-51.732979-checkpoint.json b/jondurbin__cinematika-7b-v0.1/.ipynb_checkpoints/results_2024-06-27T23-16-51.732979-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..4f69787668f3879ed10277093f61a4df3c443df3
--- /dev/null
+++ b/jondurbin__cinematika-7b-v0.1/.ipynb_checkpoints/results_2024-06-27T23-16-51.732979-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6138219478191596,
+ "acc_stderr,none": 0.004858771963468838,
+ "acc_norm,none": 0.8031268671579367,
+ "acc_norm_stderr,none": 0.00396822985262125,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 44.84948931109151,
+ "eqbench_stderr,none": 3.1571076496385277,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=jondurbin/cinematika-7b-v0.1,trust_remote_code=True",
+ "model_num_parameters": 7241756672,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "6df1846af7de7ab8e2201ad87071ed661e3b0de2",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719528705.1154015,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "[PAD]",
+ 32000
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "jondurbin/cinematika-7b-v0.1",
+ "model_name_sanitized": "jondurbin__cinematika-7b-v0.1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 81088.287076101,
+ "end_time": 82601.797810444,
+ "total_evaluation_time_seconds": "1513.510734342999"
+}
\ No newline at end of file
diff --git a/jondurbin__cinematika-7b-v0.1/results_2024-06-27T23-16-51.732979.json b/jondurbin__cinematika-7b-v0.1/results_2024-06-27T23-16-51.732979.json
new file mode 100644
index 0000000000000000000000000000000000000000..4f69787668f3879ed10277093f61a4df3c443df3
--- /dev/null
+++ b/jondurbin__cinematika-7b-v0.1/results_2024-06-27T23-16-51.732979.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6138219478191596,
+ "acc_stderr,none": 0.004858771963468838,
+ "acc_norm,none": 0.8031268671579367,
+ "acc_norm_stderr,none": 0.00396822985262125,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 44.84948931109151,
+ "eqbench_stderr,none": 3.1571076496385277,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=jondurbin/cinematika-7b-v0.1,trust_remote_code=True",
+ "model_num_parameters": 7241756672,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "6df1846af7de7ab8e2201ad87071ed661e3b0de2",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719528705.1154015,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "[PAD]",
+ 32000
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "jondurbin/cinematika-7b-v0.1",
+ "model_name_sanitized": "jondurbin__cinematika-7b-v0.1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 81088.287076101,
+ "end_time": 82601.797810444,
+ "total_evaluation_time_seconds": "1513.510734342999"
+}
\ No newline at end of file
diff --git a/kyujinpy__SOLAR-Platypus-10.7B-v2/.ipynb_checkpoints/results_2024-07-02T00-03-58.332402-checkpoint.json b/kyujinpy__SOLAR-Platypus-10.7B-v2/.ipynb_checkpoints/results_2024-07-02T00-03-58.332402-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..5964213730e122c75ec6f8397a90c43763a5cda3
--- /dev/null
+++ b/kyujinpy__SOLAR-Platypus-10.7B-v2/.ipynb_checkpoints/results_2024-07-02T00-03-58.332402-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6271659032065325,
+ "acc_stderr,none": 0.004825702533920419,
+ "acc_norm,none": 0.8204540928101972,
+ "acc_norm_stderr,none": 0.0038302437643328983,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 25.109901450892778,
+ "eqbench_stderr,none": 2.902801491896888,
+ "percent_parseable,none": 45.6140350877193,
+ "percent_parseable_stderr,none": 3.820042586602962,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=kyujinpy/SOLAR-Platypus-10.7B-v2,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "2a08546624dd10d139dd030f0ce7cf2199dd7a6a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719876463.3489583,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "kyujinpy/SOLAR-Platypus-10.7B-v2",
+ "model_name_sanitized": "kyujinpy__SOLAR-Platypus-10.7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87758.502834899,
+ "end_time": 89940.261818391,
+ "total_evaluation_time_seconds": "2181.7589834919927"
+}
\ No newline at end of file
diff --git a/kyujinpy__SOLAR-Platypus-10.7B-v2/results_2024-07-02T00-03-58.332402.json b/kyujinpy__SOLAR-Platypus-10.7B-v2/results_2024-07-02T00-03-58.332402.json
new file mode 100644
index 0000000000000000000000000000000000000000..5964213730e122c75ec6f8397a90c43763a5cda3
--- /dev/null
+++ b/kyujinpy__SOLAR-Platypus-10.7B-v2/results_2024-07-02T00-03-58.332402.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6271659032065325,
+ "acc_stderr,none": 0.004825702533920419,
+ "acc_norm,none": 0.8204540928101972,
+ "acc_norm_stderr,none": 0.0038302437643328983,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 25.109901450892778,
+ "eqbench_stderr,none": 2.902801491896888,
+ "percent_parseable,none": 45.6140350877193,
+ "percent_parseable_stderr,none": 3.820042586602962,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=kyujinpy/SOLAR-Platypus-10.7B-v2,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "2a08546624dd10d139dd030f0ce7cf2199dd7a6a",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719876463.3489583,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "kyujinpy/SOLAR-Platypus-10.7B-v2",
+ "model_name_sanitized": "kyujinpy__SOLAR-Platypus-10.7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 87758.502834899,
+ "end_time": 89940.261818391,
+ "total_evaluation_time_seconds": "2181.7589834919927"
+}
\ No newline at end of file
diff --git a/maywell__Synatra-7B-v0.3-RP/.ipynb_checkpoints/results_2024-07-02T06-46-15.142587-checkpoint.json b/maywell__Synatra-7B-v0.3-RP/.ipynb_checkpoints/results_2024-07-02T06-46-15.142587-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..ff756a6898cbd2001245ff45bbb59ede7014f95f
--- /dev/null
+++ b/maywell__Synatra-7B-v0.3-RP/.ipynb_checkpoints/results_2024-07-02T06-46-15.142587-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6164110734913364,
+ "acc_stderr,none": 0.004852658876775384,
+ "acc_norm,none": 0.8046205935072694,
+ "acc_norm_stderr,none": 0.0039568217050184535,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 54.93035121530972,
+ "eqbench_stderr,none": 2.672374443919001,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=maywell/Synatra-7B-v0.3-RP,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "a994747e68972f9018cd454730174211f9e46736",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719901236.0567749,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "maywell/Synatra-7B-v0.3-RP",
+ "model_name_sanitized": "maywell__Synatra-7B-v0.3-RP",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 112531.153671088,
+ "end_time": 114077.071988352,
+ "total_evaluation_time_seconds": "1545.9183172640041"
+}
\ No newline at end of file
diff --git a/maywell__Synatra-7B-v0.3-RP/results_2024-07-02T06-46-15.142587.json b/maywell__Synatra-7B-v0.3-RP/results_2024-07-02T06-46-15.142587.json
new file mode 100644
index 0000000000000000000000000000000000000000..ff756a6898cbd2001245ff45bbb59ede7014f95f
--- /dev/null
+++ b/maywell__Synatra-7B-v0.3-RP/results_2024-07-02T06-46-15.142587.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6164110734913364,
+ "acc_stderr,none": 0.004852658876775384,
+ "acc_norm,none": 0.8046205935072694,
+ "acc_norm_stderr,none": 0.0039568217050184535,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 54.93035121530972,
+ "eqbench_stderr,none": 2.672374443919001,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=maywell/Synatra-7B-v0.3-RP,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "a994747e68972f9018cd454730174211f9e46736",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719901236.0567749,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "maywell/Synatra-7B-v0.3-RP",
+ "model_name_sanitized": "maywell__Synatra-7B-v0.3-RP",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 112531.153671088,
+ "end_time": 114077.071988352,
+ "total_evaluation_time_seconds": "1545.9183172640041"
+}
\ No newline at end of file
diff --git a/migtissera__Synthia-7B-v3.0/.ipynb_checkpoints/results_2024-06-27T22-50-03.654626-checkpoint.json b/migtissera__Synthia-7B-v3.0/.ipynb_checkpoints/results_2024-06-27T22-50-03.654626-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..f9d2946978ec656186530adaa4780ef8b6880730
--- /dev/null
+++ b/migtissera__Synthia-7B-v3.0/.ipynb_checkpoints/results_2024-06-27T22-50-03.654626-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6188010356502689,
+ "acc_stderr,none": 0.004846886929763468,
+ "acc_norm,none": 0.8173670583549094,
+ "acc_norm_stderr,none": 0.0038557568514416335,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 15.026068685500109,
+ "eqbench_stderr,none": 2.8920476087351132,
+ "percent_parseable,none": 94.73684210526316,
+ "percent_parseable_stderr,none": 1.7126088775157098,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=migtissera/Synthia-7B-v3.0,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "93c2e8b8055b42779f2b68059ebe38af6f2789c4",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719527019.219691,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "migtissera/Synthia-7B-v3.0",
+ "model_name_sanitized": "migtissera__Synthia-7B-v3.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 79402.540235241,
+ "end_time": 80993.719447117,
+ "total_evaluation_time_seconds": "1591.1792118759913"
+}
\ No newline at end of file
diff --git a/migtissera__Synthia-7B-v3.0/results_2024-06-27T22-50-03.654626.json b/migtissera__Synthia-7B-v3.0/results_2024-06-27T22-50-03.654626.json
new file mode 100644
index 0000000000000000000000000000000000000000..f9d2946978ec656186530adaa4780ef8b6880730
--- /dev/null
+++ b/migtissera__Synthia-7B-v3.0/results_2024-06-27T22-50-03.654626.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6188010356502689,
+ "acc_stderr,none": 0.004846886929763468,
+ "acc_norm,none": 0.8173670583549094,
+ "acc_norm_stderr,none": 0.0038557568514416335,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 15.026068685500109,
+ "eqbench_stderr,none": 2.8920476087351132,
+ "percent_parseable,none": 94.73684210526316,
+ "percent_parseable_stderr,none": 1.7126088775157098,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=migtissera/Synthia-7B-v3.0,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "93c2e8b8055b42779f2b68059ebe38af6f2789c4",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719527019.219691,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "migtissera/Synthia-7B-v3.0",
+ "model_name_sanitized": "migtissera__Synthia-7B-v3.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 79402.540235241,
+ "end_time": 80993.719447117,
+ "total_evaluation_time_seconds": "1591.1792118759913"
+}
\ No newline at end of file
diff --git a/migtissera__Tess-10.7B-v1.5b/.ipynb_checkpoints/results_2024-07-01T21-27-23.093748-checkpoint.json b/migtissera__Tess-10.7B-v1.5b/.ipynb_checkpoints/results_2024-07-01T21-27-23.093748-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b2bdb33c81844b184b2f104c7ad6121dec8a2a7d
--- /dev/null
+++ b/migtissera__Tess-10.7B-v1.5b/.ipynb_checkpoints/results_2024-07-01T21-27-23.093748-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6458872734515037,
+ "acc_stderr,none": 0.004772661659628842,
+ "acc_norm,none": 0.8381796454889464,
+ "acc_norm_stderr,none": 0.0036753325906809194,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 61.834945599309265,
+ "eqbench_stderr,none": 2.695505491187219,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374285,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=migtissera/Tess-10.7B-v1.5b,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "67fd613302b31ff4a60e320685813584851a214f",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719867017.0855958,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "migtissera/Tess-10.7B-v1.5b",
+ "model_name_sanitized": "migtissera__Tess-10.7B-v1.5b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 78312.095615139,
+ "end_time": 80545.023168157,
+ "total_evaluation_time_seconds": "2232.927553018002"
+}
\ No newline at end of file
diff --git a/migtissera__Tess-10.7B-v1.5b/results_2024-07-01T21-27-23.093748.json b/migtissera__Tess-10.7B-v1.5b/results_2024-07-01T21-27-23.093748.json
new file mode 100644
index 0000000000000000000000000000000000000000..b2bdb33c81844b184b2f104c7ad6121dec8a2a7d
--- /dev/null
+++ b/migtissera__Tess-10.7B-v1.5b/results_2024-07-01T21-27-23.093748.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6458872734515037,
+ "acc_stderr,none": 0.004772661659628842,
+ "acc_norm,none": 0.8381796454889464,
+ "acc_norm_stderr,none": 0.0036753325906809194,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 61.834945599309265,
+ "eqbench_stderr,none": 2.695505491187219,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374285,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=migtissera/Tess-10.7B-v1.5b,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "67fd613302b31ff4a60e320685813584851a214f",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719867017.0855958,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "migtissera/Tess-10.7B-v1.5b",
+ "model_name_sanitized": "migtissera__Tess-10.7B-v1.5b",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 78312.095615139,
+ "end_time": 80545.023168157,
+ "total_evaluation_time_seconds": "2232.927553018002"
+}
\ No newline at end of file
diff --git a/mistralai__Mistral-7B-Instruct-v0.1/.ipynb_checkpoints/results_2024-07-02T04-41-17.557455-checkpoint.json b/mistralai__Mistral-7B-Instruct-v0.1/.ipynb_checkpoints/results_2024-07-02T04-41-17.557455-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..471f717eef234e491b0bbc2638718e82d3c284c5
--- /dev/null
+++ b/mistralai__Mistral-7B-Instruct-v0.1/.ipynb_checkpoints/results_2024-07-02T04-41-17.557455-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5630352519418442,
+ "acc_stderr,none": 0.004949969363017642,
+ "acc_norm,none": 0.7466640111531567,
+ "acc_norm_stderr,none": 0.004340328204135102,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 46.82017378717466,
+ "eqbench_stderr,none": 3.211082755225061,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "86370fc1f5e0aa51b50dcdf6eada80697b570099",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719893767.2215395,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.1",
+ "model_name_sanitized": "mistralai__Mistral-7B-Instruct-v0.1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 105062.200308793,
+ "end_time": 106579.486870813,
+ "total_evaluation_time_seconds": "1517.2865620199882"
+}
\ No newline at end of file
diff --git a/mistralai__Mistral-7B-Instruct-v0.1/results_2024-07-02T04-41-17.557455.json b/mistralai__Mistral-7B-Instruct-v0.1/results_2024-07-02T04-41-17.557455.json
new file mode 100644
index 0000000000000000000000000000000000000000..471f717eef234e491b0bbc2638718e82d3c284c5
--- /dev/null
+++ b/mistralai__Mistral-7B-Instruct-v0.1/results_2024-07-02T04-41-17.557455.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.5630352519418442,
+ "acc_stderr,none": 0.004949969363017642,
+ "acc_norm,none": 0.7466640111531567,
+ "acc_norm_stderr,none": 0.004340328204135102,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 46.82017378717466,
+ "eqbench_stderr,none": 3.211082755225061,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.1,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "86370fc1f5e0aa51b50dcdf6eada80697b570099",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719893767.2215395,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.1",
+ "model_name_sanitized": "mistralai__Mistral-7B-Instruct-v0.1",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 105062.200308793,
+ "end_time": 106579.486870813,
+ "total_evaluation_time_seconds": "1517.2865620199882"
+}
\ No newline at end of file
diff --git a/mistralai__Mistral-7B-Instruct-v0.2/.ipynb_checkpoints/results_2024-07-02T05-07-32.922766-checkpoint.json b/mistralai__Mistral-7B-Instruct-v0.2/.ipynb_checkpoints/results_2024-07-02T05-07-32.922766-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4a2384840cd9036d559db512115f6fd3b63fd53
--- /dev/null
+++ b/mistralai__Mistral-7B-Instruct-v0.2/.ipynb_checkpoints/results_2024-07-02T05-07-32.922766-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6609241187014538,
+ "acc_stderr,none": 0.004724281487819372,
+ "acc_norm,none": 0.8365863373829915,
+ "acc_norm_stderr,none": 0.0036898701424130766,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.49565100216773,
+ "eqbench_stderr,none": 2.53483923149953,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374286,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "99259002b41e116d28ccb2d04a9fbe22baed0c7f",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719895327.2738423,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.2",
+ "model_name_sanitized": "mistralai__Mistral-7B-Instruct-v0.2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 106622.397945177,
+ "end_time": 108154.852197086,
+ "total_evaluation_time_seconds": "1532.4542519090028"
+}
\ No newline at end of file
diff --git a/mistralai__Mistral-7B-Instruct-v0.2/results_2024-07-02T05-07-32.922766.json b/mistralai__Mistral-7B-Instruct-v0.2/results_2024-07-02T05-07-32.922766.json
new file mode 100644
index 0000000000000000000000000000000000000000..d4a2384840cd9036d559db512115f6fd3b63fd53
--- /dev/null
+++ b/mistralai__Mistral-7B-Instruct-v0.2/results_2024-07-02T05-07-32.922766.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6609241187014538,
+ "acc_stderr,none": 0.004724281487819372,
+ "acc_norm,none": 0.8365863373829915,
+ "acc_norm_stderr,none": 0.0036898701424130766,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.49565100216773,
+ "eqbench_stderr,none": 2.53483923149953,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374286,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mistralai/Mistral-7B-Instruct-v0.2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "99259002b41e116d28ccb2d04a9fbe22baed0c7f",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719895327.2738423,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mistralai/Mistral-7B-Instruct-v0.2",
+ "model_name_sanitized": "mistralai__Mistral-7B-Instruct-v0.2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 106622.397945177,
+ "end_time": 108154.852197086,
+ "total_evaluation_time_seconds": "1532.4542519090028"
+}
\ No newline at end of file
diff --git a/mlabonne__NeuralBeagle14-7B/.ipynb_checkpoints/results_2024-06-28T00-10-47.687175-checkpoint.json b/mlabonne__NeuralBeagle14-7B/.ipynb_checkpoints/results_2024-06-28T00-10-47.687175-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..f34b29d3931a281bc379801ab25076bfc1b6c860
--- /dev/null
+++ b/mlabonne__NeuralBeagle14-7B/.ipynb_checkpoints/results_2024-06-28T00-10-47.687175-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.7003584943238399,
+ "acc_stderr,none": 0.004571647137441099,
+ "acc_norm,none": 0.8645688109938259,
+ "acc_norm_stderr,none": 0.003414842236516961,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 74.20803234078544,
+ "eqbench_stderr,none": 1.9057062958788094,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374271,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mlabonne/NeuralBeagle14-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "1567ad618a0998139654cb355738bb9bc018ca64",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719531860.3285184,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mlabonne/NeuralBeagle14-7B",
+ "model_name_sanitized": "mlabonne__NeuralBeagle14-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 84243.603381348,
+ "end_time": 85837.751968409,
+ "total_evaluation_time_seconds": "1594.1485870609904"
+}
\ No newline at end of file
diff --git a/mlabonne__NeuralBeagle14-7B/results_2024-06-28T00-10-47.687175.json b/mlabonne__NeuralBeagle14-7B/results_2024-06-28T00-10-47.687175.json
new file mode 100644
index 0000000000000000000000000000000000000000..f34b29d3931a281bc379801ab25076bfc1b6c860
--- /dev/null
+++ b/mlabonne__NeuralBeagle14-7B/results_2024-06-28T00-10-47.687175.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.7003584943238399,
+ "acc_stderr,none": 0.004571647137441099,
+ "acc_norm,none": 0.8645688109938259,
+ "acc_norm_stderr,none": 0.003414842236516961,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 74.20803234078544,
+ "eqbench_stderr,none": 1.9057062958788094,
+ "percent_parseable,none": 99.41520467836257,
+ "percent_parseable_stderr,none": 0.5847953216374271,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=mlabonne/NeuralBeagle14-7B,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "1567ad618a0998139654cb355738bb9bc018ca64",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719531860.3285184,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "mlabonne/NeuralBeagle14-7B",
+ "model_name_sanitized": "mlabonne__NeuralBeagle14-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 84243.603381348,
+ "end_time": 85837.751968409,
+ "total_evaluation_time_seconds": "1594.1485870609904"
+}
\ No newline at end of file
diff --git a/rwitz__go-bruins/.ipynb_checkpoints/results_2024-06-27T22-21-09.060416-checkpoint.json b/rwitz__go-bruins/.ipynb_checkpoints/results_2024-06-27T22-21-09.060416-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6671b8c92304d18eb9e0ce3b9a6ffd2f7fb19c7
--- /dev/null
+++ b/rwitz__go-bruins/.ipynb_checkpoints/results_2024-06-27T22-21-09.060416-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6664011153156741,
+ "acc_stderr,none": 0.0047053471376996584,
+ "acc_norm,none": 0.8492332204740092,
+ "acc_norm_stderr,none": 0.0035709011883580865,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.61594324522352,
+ "eqbench_stderr,none": 1.8000447804156592,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=rwitz/go-bruins,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "27b510cc158d83cad4f4df4f5cee65353647e080",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719525369.5356786,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "rwitz/go-bruins",
+ "model_name_sanitized": "rwitz__go-bruins",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 77752.802678043,
+ "end_time": 79259.12526582,
+ "total_evaluation_time_seconds": "1506.322587777002"
+}
\ No newline at end of file
diff --git a/rwitz__go-bruins/results_2024-06-27T22-21-09.060416.json b/rwitz__go-bruins/results_2024-06-27T22-21-09.060416.json
new file mode 100644
index 0000000000000000000000000000000000000000..c6671b8c92304d18eb9e0ce3b9a6ffd2f7fb19c7
--- /dev/null
+++ b/rwitz__go-bruins/results_2024-06-27T22-21-09.060416.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6664011153156741,
+ "acc_stderr,none": 0.0047053471376996584,
+ "acc_norm,none": 0.8492332204740092,
+ "acc_norm_stderr,none": 0.0035709011883580865,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 73.61594324522352,
+ "eqbench_stderr,none": 1.8000447804156592,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=rwitz/go-bruins,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "27b510cc158d83cad4f4df4f5cee65353647e080",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719525369.5356786,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "rwitz/go-bruins",
+ "model_name_sanitized": "rwitz__go-bruins",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 77752.802678043,
+ "end_time": 79259.12526582,
+ "total_evaluation_time_seconds": "1506.322587777002"
+}
\ No newline at end of file
diff --git a/saishf__Fimbulvetr-Kuro-Lotus-10.7B/.ipynb_checkpoints/results_2024-07-01T23-24-05.421876-checkpoint.json b/saishf__Fimbulvetr-Kuro-Lotus-10.7B/.ipynb_checkpoints/results_2024-07-01T23-24-05.421876-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b99a1af90e30097920f2b03b8592fb5f789cb67c
--- /dev/null
+++ b/saishf__Fimbulvetr-Kuro-Lotus-10.7B/.ipynb_checkpoints/results_2024-07-01T23-24-05.421876-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.676458872734515,
+ "acc_stderr,none": 0.0046687106891924075,
+ "acc_norm,none": 0.86247759410476,
+ "acc_norm_stderr,none": 0.0034369416417827595,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.8510580469694,
+ "eqbench_stderr,none": 2.304299203691162,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=saishf/Fimbulvetr-Kuro-Lotus-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "ec1288fd8c06ac408a2a7e503ea62ac300e474e1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719874027.8025355,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "saishf/Fimbulvetr-Kuro-Lotus-10.7B",
+ "model_name_sanitized": "saishf__Fimbulvetr-Kuro-Lotus-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85322.658459155,
+ "end_time": 87547.351305784,
+ "total_evaluation_time_seconds": "2224.6928466289974"
+}
\ No newline at end of file
diff --git a/saishf__Fimbulvetr-Kuro-Lotus-10.7B/results_2024-07-01T23-24-05.421876.json b/saishf__Fimbulvetr-Kuro-Lotus-10.7B/results_2024-07-01T23-24-05.421876.json
new file mode 100644
index 0000000000000000000000000000000000000000..b99a1af90e30097920f2b03b8592fb5f789cb67c
--- /dev/null
+++ b/saishf__Fimbulvetr-Kuro-Lotus-10.7B/results_2024-07-01T23-24-05.421876.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.676458872734515,
+ "acc_stderr,none": 0.0046687106891924075,
+ "acc_norm,none": 0.86247759410476,
+ "acc_norm_stderr,none": 0.0034369416417827595,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.8510580469694,
+ "eqbench_stderr,none": 2.304299203691162,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=saishf/Fimbulvetr-Kuro-Lotus-10.7B,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "ec1288fd8c06ac408a2a7e503ea62ac300e474e1",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719874027.8025355,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "saishf/Fimbulvetr-Kuro-Lotus-10.7B",
+ "model_name_sanitized": "saishf__Fimbulvetr-Kuro-Lotus-10.7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 85322.658459155,
+ "end_time": 87547.351305784,
+ "total_evaluation_time_seconds": "2224.6928466289974"
+}
\ No newline at end of file
diff --git a/senseable__WestLake-7B-v2/.ipynb_checkpoints/results_2024-06-28T01-32-26.319492-checkpoint.json b/senseable__WestLake-7B-v2/.ipynb_checkpoints/results_2024-06-28T01-32-26.319492-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b4ce8d7c5906200b86ec9635a8fad9891d294453
--- /dev/null
+++ b/senseable__WestLake-7B-v2/.ipynb_checkpoints/results_2024-06-28T01-32-26.319492-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.7048396733718383,
+ "acc_stderr,none": 0.0045518262729780865,
+ "acc_norm,none": 0.874228241386178,
+ "acc_norm_stderr,none": 0.0033091427273509244,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 77.87295612615341,
+ "eqbench_stderr,none": 1.577200540645531,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=senseable/WestLake-7B-v2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "41625004c47628837678859753b94c50c82f3bec",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719536781.997623,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "senseable/WestLake-7B-v2",
+ "model_name_sanitized": "senseable__WestLake-7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 89165.24699567,
+ "end_time": 90736.384333217,
+ "total_evaluation_time_seconds": "1571.1373375470139"
+}
\ No newline at end of file
diff --git a/senseable__WestLake-7B-v2/results_2024-06-28T01-32-26.319492.json b/senseable__WestLake-7B-v2/results_2024-06-28T01-32-26.319492.json
new file mode 100644
index 0000000000000000000000000000000000000000..b4ce8d7c5906200b86ec9635a8fad9891d294453
--- /dev/null
+++ b/senseable__WestLake-7B-v2/results_2024-06-28T01-32-26.319492.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.7048396733718383,
+ "acc_stderr,none": 0.0045518262729780865,
+ "acc_norm,none": 0.874228241386178,
+ "acc_norm_stderr,none": 0.0033091427273509244,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 77.87295612615341,
+ "eqbench_stderr,none": 1.577200540645531,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=senseable/WestLake-7B-v2,trust_remote_code=True",
+ "model_num_parameters": 7241732096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "41625004c47628837678859753b94c50c82f3bec",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719536781.997623,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "senseable/WestLake-7B-v2",
+ "model_name_sanitized": "senseable__WestLake-7B-v2",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 89165.24699567,
+ "end_time": 90736.384333217,
+ "total_evaluation_time_seconds": "1571.1373375470139"
+}
\ No newline at end of file
diff --git a/teknium__Hermes-Trismegistus-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T05-46-44.024042-checkpoint.json b/teknium__Hermes-Trismegistus-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T05-46-44.024042-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..a18b8eef63d0074d44aaa87332fcbd4239b92440
--- /dev/null
+++ b/teknium__Hermes-Trismegistus-Mistral-7B/.ipynb_checkpoints/results_2024-07-02T05-46-44.024042-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.612427803226449,
+ "acc_stderr,none": 0.004862003566798519,
+ "acc_norm,none": 0.7921728739294961,
+ "acc_norm_stderr,none": 0.00404923158643312,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 64.86049373791583,
+ "eqbench_stderr,none": 2.1996109709512552,
+ "percent_parseable,none": 98.83040935672514,
+ "percent_parseable_stderr,none": 0.8245894595446,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=teknium/Hermes-Trismegistus-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "d5757feda9c0c62c7a56fdc2eff7d5041c970a83",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719897658.008983,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "teknium/Hermes-Trismegistus-Mistral-7B",
+ "model_name_sanitized": "teknium__Hermes-Trismegistus-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 108953.100902165,
+ "end_time": 110505.95347585,
+ "total_evaluation_time_seconds": "1552.8525736849988"
+}
\ No newline at end of file
diff --git a/teknium__Hermes-Trismegistus-Mistral-7B/results_2024-07-02T05-46-44.024042.json b/teknium__Hermes-Trismegistus-Mistral-7B/results_2024-07-02T05-46-44.024042.json
new file mode 100644
index 0000000000000000000000000000000000000000..a18b8eef63d0074d44aaa87332fcbd4239b92440
--- /dev/null
+++ b/teknium__Hermes-Trismegistus-Mistral-7B/results_2024-07-02T05-46-44.024042.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.612427803226449,
+ "acc_stderr,none": 0.004862003566798519,
+ "acc_norm,none": 0.7921728739294961,
+ "acc_norm_stderr,none": 0.00404923158643312,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 64.86049373791583,
+ "eqbench_stderr,none": 2.1996109709512552,
+ "percent_parseable,none": 98.83040935672514,
+ "percent_parseable_stderr,none": 0.8245894595446,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=teknium/Hermes-Trismegistus-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "d5757feda9c0c62c7a56fdc2eff7d5041c970a83",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": "cuda:1",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719897658.008983,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "teknium/Hermes-Trismegistus-Mistral-7B",
+ "model_name_sanitized": "teknium__Hermes-Trismegistus-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 108953.100902165,
+ "end_time": 110505.95347585,
+ "total_evaluation_time_seconds": "1552.8525736849988"
+}
\ No newline at end of file
diff --git a/teknium__OpenHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-06-27T23-43-07.467674-checkpoint.json b/teknium__OpenHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-06-27T23-43-07.467674-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..f757b449a33cf1982decded62a48d9b1a230b252
--- /dev/null
+++ b/teknium__OpenHermes-2.5-Mistral-7B/.ipynb_checkpoints/results_2024-06-27T23-43-07.467674-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6302529376618203,
+ "acc_stderr,none": 0.004817495546789561,
+ "acc_norm,none": 0.8167695678151763,
+ "acc_norm_stderr,none": 0.003860646998897285,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.75110483136034,
+ "eqbench_stderr,none": 2.270775919439369,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=teknium/OpenHermes-2.5-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "24c0bea14d53e6f67f1fbe2eca5bfe7cae389b33",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719530289.0024347,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "teknium/OpenHermes-2.5-Mistral-7B",
+ "model_name_sanitized": "teknium__OpenHermes-2.5-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 82672.316515428,
+ "end_time": 84177.532482507,
+ "total_evaluation_time_seconds": "1505.2159670789988"
+}
\ No newline at end of file
diff --git a/teknium__OpenHermes-2.5-Mistral-7B/results_2024-06-27T23-43-07.467674.json b/teknium__OpenHermes-2.5-Mistral-7B/results_2024-06-27T23-43-07.467674.json
new file mode 100644
index 0000000000000000000000000000000000000000..f757b449a33cf1982decded62a48d9b1a230b252
--- /dev/null
+++ b/teknium__OpenHermes-2.5-Mistral-7B/results_2024-06-27T23-43-07.467674.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6302529376618203,
+ "acc_stderr,none": 0.004817495546789561,
+ "acc_norm,none": 0.8167695678151763,
+ "acc_norm_stderr,none": 0.003860646998897285,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 65.75110483136034,
+ "eqbench_stderr,none": 2.270775919439369,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=teknium/OpenHermes-2.5-Mistral-7B,trust_remote_code=True",
+ "model_num_parameters": 7241748480,
+ "model_dtype": "torch.bfloat16",
+ "model_revision": "main",
+ "model_sha": "24c0bea14d53e6f67f1fbe2eca5bfe7cae389b33",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 64
+ ],
+ "device": null,
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719530289.0024347,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 24\nOn-line CPU(s) list: 0-23\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 12\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 384 KiB (12 instances)\nL1i cache: 384 KiB (12 instances)\nL2 cache: 12 MiB (12 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-23\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "<|im_end|>",
+ 32000
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 32000,
+ "max_length": 32768,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "teknium/OpenHermes-2.5-Mistral-7B",
+ "model_name_sanitized": "teknium__OpenHermes-2.5-Mistral-7B",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 82672.316515428,
+ "end_time": 84177.532482507,
+ "total_evaluation_time_seconds": "1505.2159670789988"
+}
\ No newline at end of file
diff --git a/upstage__SOLAR-10.7B-Instruct-v1.0/.ipynb_checkpoints/results_2024-07-01T20-44-42.759467-checkpoint.json b/upstage__SOLAR-10.7B-Instruct-v1.0/.ipynb_checkpoints/results_2024-07-01T20-44-42.759467-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e7bd6d3a600be5c0b76d06d787850e97d077a85
--- /dev/null
+++ b/upstage__SOLAR-10.7B-Instruct-v1.0/.ipynb_checkpoints/results_2024-07-01T20-44-42.759467-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6866162119099781,
+ "acc_stderr,none": 0.004629209184813552,
+ "acc_norm,none": 0.8634734116709819,
+ "acc_norm_stderr,none": 0.003426451744507963,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.65298935843904,
+ "eqbench_stderr,none": 2.3895445747636623,
+ "percent_parseable,none": 98.24561403508773,
+ "percent_parseable_stderr,none": 1.006919374006229,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=upstage/SOLAR-10.7B-Instruct-v1.0,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c08c25ed66414a878fe0401a3596d536c083606c",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719864409.1868505,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "upstage/SOLAR-10.7B-Instruct-v1.0",
+ "model_name_sanitized": "upstage__SOLAR-10.7B-Instruct-v1.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 75704.269434269,
+ "end_time": 77984.688135683,
+ "total_evaluation_time_seconds": "2280.4187014140043"
+}
\ No newline at end of file
diff --git a/upstage__SOLAR-10.7B-Instruct-v1.0/results_2024-07-01T20-44-42.759467.json b/upstage__SOLAR-10.7B-Instruct-v1.0/results_2024-07-01T20-44-42.759467.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e7bd6d3a600be5c0b76d06d787850e97d077a85
--- /dev/null
+++ b/upstage__SOLAR-10.7B-Instruct-v1.0/results_2024-07-01T20-44-42.759467.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6866162119099781,
+ "acc_stderr,none": 0.004629209184813552,
+ "acc_norm,none": 0.8634734116709819,
+ "acc_norm_stderr,none": 0.003426451744507963,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 68.65298935843904,
+ "eqbench_stderr,none": 2.3895445747636623,
+ "percent_parseable,none": 98.24561403508773,
+ "percent_parseable_stderr,none": 1.006919374006229,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=upstage/SOLAR-10.7B-Instruct-v1.0,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "c08c25ed66414a878fe0401a3596d536c083606c",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719864409.1868505,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 2
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "upstage/SOLAR-10.7B-Instruct-v1.0",
+ "model_name_sanitized": "upstage__SOLAR-10.7B-Instruct-v1.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 75704.269434269,
+ "end_time": 77984.688135683,
+ "total_evaluation_time_seconds": "2280.4187014140043"
+}
\ No newline at end of file
diff --git a/upstage__SOLAR-10.7B-v1.0/.ipynb_checkpoints/results_2024-07-01T20-06-05.907692-checkpoint.json b/upstage__SOLAR-10.7B-v1.0/.ipynb_checkpoints/results_2024-07-01T20-06-05.907692-checkpoint.json
new file mode 100644
index 0000000000000000000000000000000000000000..b5ddc03e5218e258ecbda98b23726e9c82dc7ba0
--- /dev/null
+++ b/upstage__SOLAR-10.7B-v1.0/.ipynb_checkpoints/results_2024-07-01T20-06-05.907692-checkpoint.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6394144592710616,
+ "acc_stderr,none": 0.004791890625834175,
+ "acc_norm,none": 0.831009759012149,
+ "acc_norm_stderr,none": 0.0037397742854184536,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 28.659786009514935,
+ "eqbench_stderr,none": 3.1809125071508557,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=upstage/SOLAR-10.7B-v1.0,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "a45090b8e56bdc2b8e32e46b3cd782fc0bea1fa5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719862032.513694,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "upstage/SOLAR-10.7B-v1.0",
+ "model_name_sanitized": "upstage__SOLAR-10.7B-v1.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 73327.637521213,
+ "end_time": 75667.837128227,
+ "total_evaluation_time_seconds": "2340.1996070140012"
+}
\ No newline at end of file
diff --git a/upstage__SOLAR-10.7B-v1.0/results_2024-07-01T20-06-05.907692.json b/upstage__SOLAR-10.7B-v1.0/results_2024-07-01T20-06-05.907692.json
new file mode 100644
index 0000000000000000000000000000000000000000..b5ddc03e5218e258ecbda98b23726e9c82dc7ba0
--- /dev/null
+++ b/upstage__SOLAR-10.7B-v1.0/results_2024-07-01T20-06-05.907692.json
@@ -0,0 +1,177 @@
+{
+ "results": {
+ "hellaswag": {
+ "acc,none": 0.6394144592710616,
+ "acc_stderr,none": 0.004791890625834175,
+ "acc_norm,none": 0.831009759012149,
+ "acc_norm_stderr,none": 0.0037397742854184536,
+ "alias": "hellaswag"
+ },
+ "eq_bench": {
+ "eqbench,none": 28.659786009514935,
+ "eqbench_stderr,none": 3.1809125071508557,
+ "percent_parseable,none": 100.0,
+ "percent_parseable_stderr,none": 0.0,
+ "alias": "eq_bench"
+ }
+ },
+ "group_subtasks": {
+ "eq_bench": [],
+ "hellaswag": []
+ },
+ "configs": {
+ "eq_bench": {
+ "task": "eq_bench",
+ "dataset_path": "pbevan11/EQ-Bench",
+ "validation_split": "validation",
+ "doc_to_text": "prompt",
+ "doc_to_target": "reference_answer_fullscale",
+ "process_results": "def calculate_score_fullscale(docs, results):\n reference = eval(docs[\"reference_answer_fullscale\"])\n user = dict(re.findall(r\"(\\w+):\\s+(\\d+)\", results[0]))\n # First check that the emotions specified in the answer match those in the reference\n if len(user.items()) != 4:\n # print('! Error: 4 emotions were not returned')\n # print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n emotions_dict = {}\n for emotion, user_emotion_score in user.items():\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n emotions_dict[emotion] = True\n if len(emotions_dict) != 4:\n print(\"! Error: emotions did not match reference\")\n print(user)\n return {\"eqbench\": 0, \"percent_parseable\": 0}\n\n difference_tally = (\n 0 # Tally of differerence from reference answers for this question\n )\n\n # Iterate over each emotion in the user's answers.\n for emotion, user_emotion_score in user.items():\n # If this emotion is in the reference, calculate the difference between the user's score and the reference score.\n for i in range(1, 5):\n if emotion == reference[f\"emotion{i}\"]:\n d = abs(\n float(user_emotion_score) - float(reference[f\"emotion{i}_score\"])\n )\n # this will be a value between 0 and 10\n if d == 0:\n scaled_difference = 0\n elif d <= 5:\n # S-shaped scaling function\n # https://www.desmos.com/calculator\n # 6.5\\cdot\\ \\frac{1}{\\left(1\\ +\\ e^{\\left(-1.2\\cdot\\left(x-4\\right)\\right)}\\right)}\n scaled_difference = 6.5 * (1 / (1 + math.e ** (-1.2 * (d - 4))))\n\n else:\n scaled_difference = d\n difference_tally += scaled_difference\n\n # Inverting the difference tally so that the closer the answer is to reference, the higher the score.\n # The adjustment constant is chosen such that answering randomly produces a score of zero.\n adjust_const = 0.7477\n final_score = 10 - (difference_tally * adjust_const)\n final_score_percent = final_score * 10\n\n return {\"eqbench\": final_score_percent, \"percent_parseable\": 100}\n",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "eqbench",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "percent_parseable",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "generate_until",
+ "generation_kwargs": {
+ "do_sample": false,
+ "temperature": 0.0,
+ "max_gen_toks": 80,
+ "until": [
+ "\n\n"
+ ]
+ },
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 2.1
+ }
+ },
+ "hellaswag": {
+ "task": "hellaswag",
+ "group": [
+ "multiple_choice"
+ ],
+ "dataset_path": "hellaswag",
+ "training_split": "train",
+ "validation_split": "validation",
+ "process_docs": "def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:\n def _process_doc(doc):\n ctx = doc[\"ctx_a\"] + \" \" + doc[\"ctx_b\"].capitalize()\n out_doc = {\n \"query\": preprocess(doc[\"activity_label\"] + \": \" + ctx),\n \"choices\": [preprocess(ending) for ending in doc[\"endings\"]],\n \"gold\": int(doc[\"label\"]),\n }\n return out_doc\n\n return dataset.map(_process_doc)\n",
+ "doc_to_text": "{{query}}",
+ "doc_to_target": "{{label}}",
+ "doc_to_choice": "choices",
+ "description": "",
+ "target_delimiter": " ",
+ "fewshot_delimiter": "\n\n",
+ "num_fewshot": 0,
+ "metric_list": [
+ {
+ "metric": "acc",
+ "aggregation": "mean",
+ "higher_is_better": true
+ },
+ {
+ "metric": "acc_norm",
+ "aggregation": "mean",
+ "higher_is_better": true
+ }
+ ],
+ "output_type": "multiple_choice",
+ "repeats": 1,
+ "should_decontaminate": false,
+ "metadata": {
+ "version": 1.0
+ }
+ }
+ },
+ "versions": {
+ "eq_bench": 2.1,
+ "hellaswag": 1.0
+ },
+ "n-shot": {
+ "eq_bench": 0,
+ "hellaswag": 0
+ },
+ "higher_is_better": {
+ "eq_bench": {
+ "eqbench": true,
+ "percent_parseable": true
+ },
+ "hellaswag": {
+ "acc": true,
+ "acc_norm": true
+ }
+ },
+ "n-samples": {
+ "hellaswag": {
+ "original": 10042,
+ "effective": 10042
+ },
+ "eq_bench": {
+ "original": 171,
+ "effective": 171
+ }
+ },
+ "config": {
+ "model": "hf",
+ "model_args": "pretrained=upstage/SOLAR-10.7B-v1.0,trust_remote_code=True",
+ "model_num_parameters": 10731524096,
+ "model_dtype": "torch.float16",
+ "model_revision": "main",
+ "model_sha": "a45090b8e56bdc2b8e32e46b3cd782fc0bea1fa5",
+ "batch_size": "auto",
+ "batch_sizes": [
+ 16
+ ],
+ "device": "cuda:0",
+ "use_cache": null,
+ "limit": null,
+ "bootstrap_iters": 100000,
+ "gen_kwargs": null,
+ "random_seed": 0,
+ "numpy_seed": 1234,
+ "torch_seed": 1234,
+ "fewshot_seed": 1234
+ },
+ "git_hash": null,
+ "date": 1719862032.513694,
+ "pretty_env_info": "PyTorch version: 2.3.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 22.04.4 LTS (x86_64)\nGCC version: (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.35\n\nPython version: 3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0] (64-bit runtime)\nPython platform: Linux-6.5.0-1022-gcp-x86_64-with-glibc2.35\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA L4\nGPU 1: NVIDIA L4\n\nNvidia driver version: 555.42.02\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 46 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 48\nOn-line CPU(s) list: 0-47\nVendor ID: GenuineIntel\nModel name: Intel(R) Xeon(R) CPU @ 2.20GHz\nCPU family: 6\nModel: 85\nThread(s) per core: 2\nCore(s) per socket: 24\nSocket(s): 1\nStepping: 7\nBogoMIPS: 4400.47\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat avx512_vnni md_clear arch_capabilities\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 768 KiB (24 instances)\nL1i cache: 768 KiB (24 instances)\nL2 cache: 24 MiB (24 instances)\nL3 cache: 38.5 MiB (1 instance)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-47\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Mitigation; Clear CPU buffers; SMT Host state unknown\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown\nVulnerability Retbleed: Mitigation; Enhanced IBRS\nVulnerability Spec rstack overflow: Not affected\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Enhanced / Automatic IBRS; IBPB conditional; RSB filling; PBRSB-eIBRS SW sequence; BHI Syscall hardening, KVM SW loop\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Mitigation; Clear CPU buffers; SMT Host state unknown\n\nVersions of relevant libraries:\n[pip3] numpy==2.0.0\n[pip3] torch==2.3.1\n[pip3] triton==2.3.1\n[conda] Could not collect",
+ "transformers_version": "4.41.2",
+ "upper_git_hash": null,
+ "tokenizer_pad_token": [
+ "",
+ 0
+ ],
+ "tokenizer_eos_token": [
+ "",
+ 2
+ ],
+ "tokenizer_bos_token": [
+ "",
+ 1
+ ],
+ "eot_token_id": 2,
+ "max_length": 4096,
+ "task_hashes": {},
+ "model_source": "hf",
+ "model_name": "upstage/SOLAR-10.7B-v1.0",
+ "model_name_sanitized": "upstage__SOLAR-10.7B-v1.0",
+ "system_instruction": null,
+ "system_instruction_sha": null,
+ "fewshot_as_multiturn": false,
+ "chat_template": null,
+ "chat_template_sha": null,
+ "start_time": 73327.637521213,
+ "end_time": 75667.837128227,
+ "total_evaluation_time_seconds": "2340.1996070140012"
+}
\ No newline at end of file