|
bootstrapping for stddev: perplexity |
|
{ |
|
"results": { |
|
"arc_challenge": { |
|
"acc,none": 0.1945392491467577, |
|
"acc_stderr,none": 0.011567709174648728, |
|
"acc_norm,none": 0.2363481228668942, |
|
"acc_norm_stderr,none": 0.012414960524301837 |
|
}, |
|
"arc_easy": { |
|
"acc,none": 0.4364478114478115, |
|
"acc_stderr,none": 0.010176569980111043, |
|
"acc_norm,none": 0.39646464646464646, |
|
"acc_norm_stderr,none": 0.010037412763064529 |
|
}, |
|
"boolq": { |
|
"acc,none": 0.5691131498470948, |
|
"acc_stderr,none": 0.008661108320775383 |
|
}, |
|
"hellaswag": { |
|
"acc,none": 0.28380800637323245, |
|
"acc_stderr,none": 0.004499233874427503, |
|
"acc_norm,none": 0.3027285401314479, |
|
"acc_norm_stderr,none": 0.004584997935360479 |
|
}, |
|
"lambada_openai": { |
|
"perplexity,none": 37.255307146908386, |
|
"perplexity_stderr,none": 1.399816741635691, |
|
"acc,none": 0.3537745002910926, |
|
"acc_stderr,none": 0.006661428663512768 |
|
}, |
|
"openbookqa": { |
|
"acc,none": 0.15, |
|
"acc_stderr,none": 0.015984712135164926, |
|
"acc_norm,none": 0.268, |
|
"acc_norm_stderr,none": 0.019827714859587578 |
|
}, |
|
"piqa": { |
|
"acc,none": 0.6229597388465724, |
|
"acc_stderr,none": 0.011307569752543899, |
|
"acc_norm,none": 0.6196953210010882, |
|
"acc_norm_stderr,none": 0.011326620892570319 |
|
}, |
|
"sciq": { |
|
"acc,none": 0.754, |
|
"acc_stderr,none": 0.013626065817750629, |
|
"acc_norm,none": 0.677, |
|
"acc_norm_stderr,none": 0.014794927843348628 |
|
}, |
|
"wikitext": { |
|
"word_perplexity,none": 62.20996521798232, |
|
"byte_perplexity,none": 1.975726163529806, |
|
"bits_per_byte,none": 0.9823830026441426 |
|
}, |
|
"winogrande": { |
|
"acc,none": 0.5130228887134964, |
|
"acc_stderr,none": 0.01404771839399767 |
|
} |
|
}, |
|
"configs": { |
|
"arc_challenge": { |
|
"task": "arc_challenge", |
|
"group": [ |
|
"ai2_arc", |
|
"multiple_choice" |
|
], |
|
"dataset_path": "ai2_arc", |
|
"dataset_name": "ARC-Challenge", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"test_split": "test", |
|
"doc_to_text": "Question: {{question}}\nAnswer:", |
|
"doc_to_target": "{{choices.label.index(answerKey)}}", |
|
"doc_to_choice": "{{choices.text}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:" |
|
}, |
|
"arc_easy": { |
|
"task": "arc_easy", |
|
"group": [ |
|
"ai2_arc", |
|
"multiple_choice" |
|
], |
|
"dataset_path": "ai2_arc", |
|
"dataset_name": "ARC-Easy", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"test_split": "test", |
|
"doc_to_text": "Question: {{question}}\nAnswer:", |
|
"doc_to_target": "{{choices.label.index(answerKey)}}", |
|
"doc_to_choice": "{{choices.text}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "Question: {{question}}\nAnswer:" |
|
}, |
|
"boolq": { |
|
"task": "boolq", |
|
"group": [ |
|
"super-glue-lm-eval-v1" |
|
], |
|
"dataset_path": "super_glue", |
|
"dataset_name": "boolq", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:", |
|
"doc_to_target": "label", |
|
"doc_to_choice": [ |
|
"no", |
|
"yes" |
|
], |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc" |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "passage" |
|
}, |
|
"hellaswag": { |
|
"task": "hellaswag", |
|
"group": [ |
|
"multiple_choice" |
|
], |
|
"dataset_path": "hellaswag", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}", |
|
"doc_to_target": "{{label}}", |
|
"doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": false |
|
}, |
|
"lambada_openai": { |
|
"task": "lambada_openai", |
|
"group": [ |
|
"lambada", |
|
"loglikelihood", |
|
"perplexity" |
|
], |
|
"dataset_path": "EleutherAI/lambada_openai", |
|
"dataset_name": "default", |
|
"test_split": "test", |
|
"template_aliases": "", |
|
"doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}", |
|
"doc_to_target": "{{' '+text.split(' ')[-1]}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "perplexity", |
|
"aggregation": "perplexity", |
|
"higher_is_better": false |
|
}, |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "loglikelihood", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "{{text}}" |
|
}, |
|
"openbookqa": { |
|
"task": "openbookqa", |
|
"group": [ |
|
"multiple_choice" |
|
], |
|
"dataset_path": "openbookqa", |
|
"dataset_name": "main", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"test_split": "test", |
|
"doc_to_text": "question_stem", |
|
"doc_to_target": "{{choices.label.index(answerKey.lstrip())}}", |
|
"doc_to_choice": "{{choices.text}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "question_stem" |
|
}, |
|
"piqa": { |
|
"task": "piqa", |
|
"group": [ |
|
"multiple_choice" |
|
], |
|
"dataset_path": "piqa", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"doc_to_text": "Question: {{goal}}\nAnswer:", |
|
"doc_to_target": "label", |
|
"doc_to_choice": "{{[sol1, sol2]}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "goal" |
|
}, |
|
"sciq": { |
|
"task": "sciq", |
|
"group": [ |
|
"multiple_choice" |
|
], |
|
"dataset_path": "sciq", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"test_split": "test", |
|
"doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:", |
|
"doc_to_target": 3, |
|
"doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
}, |
|
{ |
|
"metric": "acc_norm", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "{{support}} {{question}}" |
|
}, |
|
"wikitext": { |
|
"task": "wikitext", |
|
"group": [ |
|
"perplexity", |
|
"loglikelihood_rolling" |
|
], |
|
"dataset_path": "EleutherAI/wikitext_document_level", |
|
"dataset_name": "wikitext-2-raw-v1", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"test_split": "test", |
|
"template_aliases": "", |
|
"doc_to_text": "", |
|
"doc_to_target": "<function wikitext_detokenizer at 0x7f110a2a0040>", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "word_perplexity" |
|
}, |
|
{ |
|
"metric": "byte_perplexity" |
|
}, |
|
{ |
|
"metric": "bits_per_byte" |
|
} |
|
], |
|
"output_type": "loglikelihood_rolling", |
|
"repeats": 1, |
|
"should_decontaminate": true, |
|
"doc_to_decontamination_query": "{{page}}" |
|
}, |
|
"winogrande": { |
|
"task": "winogrande", |
|
"dataset_path": "winogrande", |
|
"dataset_name": "winogrande_xl", |
|
"training_split": "train", |
|
"validation_split": "validation", |
|
"doc_to_text": "<function doc_to_text at 0x7f110a272ef0>", |
|
"doc_to_target": "<function doc_to_target at 0x7f110a273370>", |
|
"doc_to_choice": "<function doc_to_choice at 0x7f110a2735b0>", |
|
"description": "", |
|
"target_delimiter": " ", |
|
"fewshot_delimiter": "\n\n", |
|
"num_fewshot": 0, |
|
"metric_list": [ |
|
{ |
|
"metric": "acc", |
|
"aggregation": "mean", |
|
"higher_is_better": true |
|
} |
|
], |
|
"output_type": "multiple_choice", |
|
"repeats": 1, |
|
"should_decontaminate": false |
|
} |
|
}, |
|
"versions": { |
|
"arc_challenge": "Yaml", |
|
"arc_easy": "Yaml", |
|
"boolq": "Yaml", |
|
"hellaswag": "Yaml", |
|
"lambada_openai": "Yaml", |
|
"openbookqa": "Yaml", |
|
"piqa": "Yaml", |
|
"sciq": "Yaml", |
|
"wikitext": "Yaml", |
|
"winogrande": "Yaml" |
|
}, |
|
"config": { |
|
"model": "hf", |
|
"model_args": "pretrained=EleutherAI/pythia-160m", |
|
"num_fewshot": 0, |
|
"batch_size": 16, |
|
"batch_sizes": [], |
|
"device": "cuda:0", |
|
"use_cache": null, |
|
"limit": null, |
|
"bootstrap_iters": 100000 |
|
}, |
|
"git_hash": "4e44f0a" |
|
} |
|
hf (pretrained=EleutherAI/pythia-160m), limit: None, num_fewshot: 0, batch_size: 16 |
|
| Task |Version|Filter| Metric | Value | |Stderr| |
|
|--------------|-------|------|---------------|------:|---|-----:| |
|
|arc_challenge |Yaml |none |acc | 0.1945|± |0.0116| |
|
| | |none |acc_norm | 0.2363|± |0.0124| |
|
|arc_easy |Yaml |none |acc | 0.4364|± |0.0102| |
|
| | |none |acc_norm | 0.3965|± |0.0100| |
|
|boolq |Yaml |none |acc | 0.5691|± |0.0087| |
|
|hellaswag |Yaml |none |acc | 0.2838|± |0.0045| |
|
| | |none |acc_norm | 0.3027|± |0.0046| |
|
|lambada_openai|Yaml |none |perplexity |37.2553|± |1.3998| |
|
| | |none |acc | 0.3538|± |0.0067| |
|
|openbookqa |Yaml |none |acc | 0.1500|± |0.0160| |
|
| | |none |acc_norm | 0.2680|± |0.0198| |
|
|piqa |Yaml |none |acc | 0.6230|± |0.0113| |
|
| | |none |acc_norm | 0.6197|± |0.0113| |
|
|sciq |Yaml |none |acc | 0.7540|± |0.0136| |
|
| | |none |acc_norm | 0.6770|± |0.0148| |
|
|wikitext |Yaml |none |word_perplexity|62.2100| | | |
|
| | |none |byte_perplexity| 1.9757| | | |
|
| | |none |bits_per_byte | 0.9824| | | |
|
|winogrande |Yaml |none |acc | 0.5130|± |0.0140| |
|
|
|
|