lomahony commited on
Commit
4452d9d
1 Parent(s): 7a98bfa

Upload 2 files

Browse files
dpo-160m-eval-files/dpo-pythia-160m-0shot-shelloutput.txt ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ {
3
+ "results": {
4
+ "arc_challenge": {
5
+ "acc,none": 0.19283276450511946,
6
+ "acc_stderr,none": 0.01152905546566333,
7
+ "acc_norm,none": 0.24488054607508533,
8
+ "acc_norm_stderr,none": 0.012566273985131358
9
+ },
10
+ "arc_easy": {
11
+ "acc,none": 0.4671717171717172,
12
+ "acc_stderr,none": 0.010237645778853858,
13
+ "acc_norm,none": 0.4132996632996633,
14
+ "acc_norm_stderr,none": 0.010104361780747516
15
+ },
16
+ "boolq": {
17
+ "acc,none": 0.6149847094801223,
18
+ "acc_stderr,none": 0.00851066875102728
19
+ },
20
+ "hellaswag": {
21
+ "acc,none": 0.28958374825731925,
22
+ "acc_stderr,none": 0.004526422125860652,
23
+ "acc_norm,none": 0.3016331408086039,
24
+ "acc_norm_stderr,none": 0.004580288728196038
25
+ },
26
+ "lambada_openai": {
27
+ "perplexity,none": 40.49750927655119,
28
+ "perplexity_stderr,none": 1.9470980651595484,
29
+ "acc,none": 0.35066951290510384,
30
+ "acc_stderr,none": 0.006648045374603881
31
+ },
32
+ "openbookqa": {
33
+ "acc,none": 0.172,
34
+ "acc_stderr,none": 0.01689386887634748,
35
+ "acc_norm,none": 0.28,
36
+ "acc_norm_stderr,none": 0.020099950647503237
37
+ },
38
+ "piqa": {
39
+ "acc,none": 0.6332970620239391,
40
+ "acc_stderr,none": 0.011243625019038255,
41
+ "acc_norm,none": 0.6262241566920566,
42
+ "acc_norm_stderr,none": 0.011287972563201014
43
+ },
44
+ "sciq": {
45
+ "acc,none": 0.753,
46
+ "acc_stderr,none": 0.013644675781314128,
47
+ "acc_norm,none": 0.67,
48
+ "acc_norm_stderr,none": 0.014876872027456732
49
+ },
50
+ "wikitext": {
51
+ "word_perplexity,none": 75.24993841350984,
52
+ "byte_perplexity,none": 2.0386909133746456,
53
+ "bits_per_byte,none": 1.0276430644458452
54
+ },
55
+ "winogrande": {
56
+ "acc,none": 0.5138121546961326,
57
+ "acc_stderr,none": 0.014047122916440419
58
+ }
59
+ },
60
+ "configs": {
61
+ "arc_challenge": {
62
+ "task": "arc_challenge",
63
+ "group": [
64
+ "ai2_arc",
65
+ "multiple_choice"
66
+ ],
67
+ "dataset_path": "ai2_arc",
68
+ "dataset_name": "ARC-Challenge",
69
+ "training_split": "train",
70
+ "validation_split": "validation",
71
+ "test_split": "test",
72
+ "doc_to_text": "Question: {{question}}\nAnswer:",
73
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
74
+ "doc_to_choice": "{{choices.text}}",
75
+ "description": "",
76
+ "target_delimiter": " ",
77
+ "fewshot_delimiter": "\n\n",
78
+ "num_fewshot": 0,
79
+ "metric_list": [
80
+ {
81
+ "metric": "acc",
82
+ "aggregation": "mean",
83
+ "higher_is_better": true
84
+ },
85
+ {
86
+ "metric": "acc_norm",
87
+ "aggregation": "mean",
88
+ "higher_is_better": true
89
+ }
90
+ ],
91
+ "output_type": "multiple_choice",
92
+ "repeats": 1,
93
+ "should_decontaminate": true,
94
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
95
+ },
96
+ "arc_easy": {
97
+ "task": "arc_easy",
98
+ "group": [
99
+ "ai2_arc",
100
+ "multiple_choice"
101
+ ],
102
+ "dataset_path": "ai2_arc",
103
+ "dataset_name": "ARC-Easy",
104
+ "training_split": "train",
105
+ "validation_split": "validation",
106
+ "test_split": "test",
107
+ "doc_to_text": "Question: {{question}}\nAnswer:",
108
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
109
+ "doc_to_choice": "{{choices.text}}",
110
+ "description": "",
111
+ "target_delimiter": " ",
112
+ "fewshot_delimiter": "\n\n",
113
+ "num_fewshot": 0,
114
+ "metric_list": [
115
+ {
116
+ "metric": "acc",
117
+ "aggregation": "mean",
118
+ "higher_is_better": true
119
+ },
120
+ {
121
+ "metric": "acc_norm",
122
+ "aggregation": "mean",
123
+ "higher_is_better": true
124
+ }
125
+ ],
126
+ "output_type": "multiple_choice",
127
+ "repeats": 1,
128
+ "should_decontaminate": true,
129
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
130
+ },
131
+ "boolq": {
132
+ "task": "boolq",
133
+ "group": [
134
+ "super-glue-lm-eval-v1"
135
+ ],
136
+ "dataset_path": "super_glue",
137
+ "dataset_name": "boolq",
138
+ "training_split": "train",
139
+ "validation_split": "validation",
140
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
141
+ "doc_to_target": "label",
142
+ "doc_to_choice": [
143
+ "no",
144
+ "yes"
145
+ ],
146
+ "description": "",
147
+ "target_delimiter": " ",
148
+ "fewshot_delimiter": "\n\n",
149
+ "num_fewshot": 0,
150
+ "metric_list": [
151
+ {
152
+ "metric": "acc"
153
+ }
154
+ ],
155
+ "output_type": "multiple_choice",
156
+ "repeats": 1,
157
+ "should_decontaminate": true,
158
+ "doc_to_decontamination_query": "passage"
159
+ },
160
+ "hellaswag": {
161
+ "task": "hellaswag",
162
+ "group": [
163
+ "multiple_choice"
164
+ ],
165
+ "dataset_path": "hellaswag",
166
+ "training_split": "train",
167
+ "validation_split": "validation",
168
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
169
+ "doc_to_target": "{{label}}",
170
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
171
+ "description": "",
172
+ "target_delimiter": " ",
173
+ "fewshot_delimiter": "\n\n",
174
+ "num_fewshot": 0,
175
+ "metric_list": [
176
+ {
177
+ "metric": "acc",
178
+ "aggregation": "mean",
179
+ "higher_is_better": true
180
+ },
181
+ {
182
+ "metric": "acc_norm",
183
+ "aggregation": "mean",
184
+ "higher_is_better": true
185
+ }
186
+ ],
187
+ "output_type": "multiple_choice",
188
+ "repeats": 1,
189
+ "should_decontaminate": false
190
+ },
191
+ "lambada_openai": {
192
+ "task": "lambada_openai",
193
+ "group": [
194
+ "lambada",
195
+ "loglikelihood",
196
+ "perplexity"
197
+ ],
198
+ "dataset_path": "EleutherAI/lambada_openai",
199
+ "dataset_name": "default",
200
+ "test_split": "test",
201
+ "template_aliases": "",
202
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
203
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
204
+ "description": "",
205
+ "target_delimiter": " ",
206
+ "fewshot_delimiter": "\n\n",
207
+ "num_fewshot": 0,
208
+ "metric_list": [
209
+ {
210
+ "metric": "perplexity",
211
+ "aggregation": "perplexity",
212
+ "higher_is_better": false
213
+ },
214
+ {
215
+ "metric": "acc",
216
+ "aggregation": "mean",
217
+ "higher_is_better": true
218
+ }
219
+ ],
220
+ "output_type": "loglikelihood",
221
+ "repeats": 1,
222
+ "should_decontaminate": true,
223
+ "doc_to_decontamination_query": "{{text}}"
224
+ },
225
+ "openbookqa": {
226
+ "task": "openbookqa",
227
+ "group": [
228
+ "multiple_choice"
229
+ ],
230
+ "dataset_path": "openbookqa",
231
+ "dataset_name": "main",
232
+ "training_split": "train",
233
+ "validation_split": "validation",
234
+ "test_split": "test",
235
+ "doc_to_text": "question_stem",
236
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
237
+ "doc_to_choice": "{{choices.text}}",
238
+ "description": "",
239
+ "target_delimiter": " ",
240
+ "fewshot_delimiter": "\n\n",
241
+ "num_fewshot": 0,
242
+ "metric_list": [
243
+ {
244
+ "metric": "acc",
245
+ "aggregation": "mean",
246
+ "higher_is_better": true
247
+ },
248
+ {
249
+ "metric": "acc_norm",
250
+ "aggregation": "mean",
251
+ "higher_is_better": true
252
+ }
253
+ ],
254
+ "output_type": "multiple_choice",
255
+ "repeats": 1,
256
+ "should_decontaminate": true,
257
+ "doc_to_decontamination_query": "question_stem"
258
+ },
259
+ "piqa": {
260
+ "task": "piqa",
261
+ "group": [
262
+ "multiple_choice"
263
+ ],
264
+ "dataset_path": "piqa",
265
+ "training_split": "train",
266
+ "validation_split": "validation",
267
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
268
+ "doc_to_target": "label",
269
+ "doc_to_choice": "{{[sol1, sol2]}}",
270
+ "description": "",
271
+ "target_delimiter": " ",
272
+ "fewshot_delimiter": "\n\n",
273
+ "num_fewshot": 0,
274
+ "metric_list": [
275
+ {
276
+ "metric": "acc",
277
+ "aggregation": "mean",
278
+ "higher_is_better": true
279
+ },
280
+ {
281
+ "metric": "acc_norm",
282
+ "aggregation": "mean",
283
+ "higher_is_better": true
284
+ }
285
+ ],
286
+ "output_type": "multiple_choice",
287
+ "repeats": 1,
288
+ "should_decontaminate": true,
289
+ "doc_to_decontamination_query": "goal"
290
+ },
291
+ "sciq": {
292
+ "task": "sciq",
293
+ "group": [
294
+ "multiple_choice"
295
+ ],
296
+ "dataset_path": "sciq",
297
+ "training_split": "train",
298
+ "validation_split": "validation",
299
+ "test_split": "test",
300
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
301
+ "doc_to_target": 3,
302
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
303
+ "description": "",
304
+ "target_delimiter": " ",
305
+ "fewshot_delimiter": "\n\n",
306
+ "num_fewshot": 0,
307
+ "metric_list": [
308
+ {
309
+ "metric": "acc",
310
+ "aggregation": "mean",
311
+ "higher_is_better": true
312
+ },
313
+ {
314
+ "metric": "acc_norm",
315
+ "aggregation": "mean",
316
+ "higher_is_better": true
317
+ }
318
+ ],
319
+ "output_type": "multiple_choice",
320
+ "repeats": 1,
321
+ "should_decontaminate": true,
322
+ "doc_to_decontamination_query": "{{support}} {{question}}"
323
+ },
324
+ "wikitext": {
325
+ "task": "wikitext",
326
+ "group": [
327
+ "perplexity",
328
+ "loglikelihood_rolling"
329
+ ],
330
+ "dataset_path": "EleutherAI/wikitext_document_level",
331
+ "dataset_name": "wikitext-2-raw-v1",
332
+ "training_split": "train",
333
+ "validation_split": "validation",
334
+ "test_split": "test",
335
+ "template_aliases": "",
336
+ "doc_to_text": "",
337
+ "doc_to_target": "<function wikitext_detokenizer at 0x7fd1a1c44040>",
338
+ "description": "",
339
+ "target_delimiter": " ",
340
+ "fewshot_delimiter": "\n\n",
341
+ "num_fewshot": 0,
342
+ "metric_list": [
343
+ {
344
+ "metric": "word_perplexity"
345
+ },
346
+ {
347
+ "metric": "byte_perplexity"
348
+ },
349
+ {
350
+ "metric": "bits_per_byte"
351
+ }
352
+ ],
353
+ "output_type": "loglikelihood_rolling",
354
+ "repeats": 1,
355
+ "should_decontaminate": true,
356
+ "doc_to_decontamination_query": "{{page}}"
357
+ },
358
+ "winogrande": {
359
+ "task": "winogrande",
360
+ "dataset_path": "winogrande",
361
+ "dataset_name": "winogrande_xl",
362
+ "training_split": "train",
363
+ "validation_split": "validation",
364
+ "doc_to_text": "<function doc_to_text at 0x7fd1a1c16ef0>",
365
+ "doc_to_target": "<function doc_to_target at 0x7fd1a1c17370>",
366
+ "doc_to_choice": "<function doc_to_choice at 0x7fd1a1c175b0>",
367
+ "description": "",
368
+ "target_delimiter": " ",
369
+ "fewshot_delimiter": "\n\n",
370
+ "num_fewshot": 0,
371
+ "metric_list": [
372
+ {
373
+ "metric": "acc",
374
+ "aggregation": "mean",
375
+ "higher_is_better": true
376
+ }
377
+ ],
378
+ "output_type": "multiple_choice",
379
+ "repeats": 1,
380
+ "should_decontaminate": false
381
+ }
382
+ },
383
+ "versions": {
384
+ "arc_challenge": "Yaml",
385
+ "arc_easy": "Yaml",
386
+ "boolq": "Yaml",
387
+ "hellaswag": "Yaml",
388
+ "lambada_openai": "Yaml",
389
+ "openbookqa": "Yaml",
390
+ "piqa": "Yaml",
391
+ "sciq": "Yaml",
392
+ "wikitext": "Yaml",
393
+ "winogrande": "Yaml"
394
+ },
395
+ "config": {
396
+ "model": "hf",
397
+ "model_args": "pretrained=lomahony/eleuther-pythia160m-hh-dpo",
398
+ "num_fewshot": 0,
399
+ "batch_size": 16,
400
+ "batch_sizes": [],
401
+ "device": "cuda:0",
402
+ "use_cache": null,
403
+ "limit": null,
404
+ "bootstrap_iters": 100000
405
+ },
406
+ "git_hash": "4e44f0a"
407
+ }
408
+ hf (pretrained=lomahony/eleuther-pythia160m-hh-dpo), limit: None, num_fewshot: 0, batch_size: 16
409
+ | Task |Version|Filter| Metric | Value | |Stderr|
410
+ |--------------|-------|------|---------------|------:|---|-----:|
411
+ |arc_challenge |Yaml |none |acc | 0.1928|± |0.0115|
412
+ | | |none |acc_norm | 0.2449|± |0.0126|
413
+ |arc_easy |Yaml |none |acc | 0.4672|± |0.0102|
414
+ | | |none |acc_norm | 0.4133|± |0.0101|
415
+ |boolq |Yaml |none |acc | 0.6150|± |0.0085|
416
+ |hellaswag |Yaml |none |acc | 0.2896|± |0.0045|
417
+ | | |none |acc_norm | 0.3016|± |0.0046|
418
+ |lambada_openai|Yaml |none |perplexity |40.4975|± |1.9471|
419
+ | | |none |acc | 0.3507|± |0.0066|
420
+ |openbookqa |Yaml |none |acc | 0.1720|± |0.0169|
421
+ | | |none |acc_norm | 0.2800|± |0.0201|
422
+ |piqa |Yaml |none |acc | 0.6333|± |0.0112|
423
+ | | |none |acc_norm | 0.6262|± |0.0113|
424
+ |sciq |Yaml |none |acc | 0.7530|± |0.0136|
425
+ | | |none |acc_norm | 0.6700|± |0.0149|
426
+ |wikitext |Yaml |none |word_perplexity|75.2499| | |
427
+ | | |none |byte_perplexity| 2.0387| | |
428
+ | | |none |bits_per_byte | 1.0276| | |
429
+ |winogrande |Yaml |none |acc | 0.5138|± |0.0140|
430
+
dpo-160m-eval-files/dpo-pythia-160m-5shot-shelloutput.txt ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bootstrapping for stddev: perplexity
2
+ {
3
+ "results": {
4
+ "arc_challenge": {
5
+ "acc,none": 0.20563139931740615,
6
+ "acc_stderr,none": 0.011810745260742547,
7
+ "acc_norm,none": 0.24744027303754265,
8
+ "acc_norm_stderr,none": 0.01261035266329267
9
+ },
10
+ "arc_easy": {
11
+ "acc,none": 0.46380471380471383,
12
+ "acc_stderr,none": 0.01023286555034674,
13
+ "acc_norm,none": 0.4385521885521885,
14
+ "acc_norm_stderr,none": 0.010182010275471116
15
+ },
16
+ "boolq": {
17
+ "acc,none": 0.6055045871559633,
18
+ "acc_stderr,none": 0.008548152025770934
19
+ },
20
+ "hellaswag": {
21
+ "acc,none": 0.2885879306910974,
22
+ "acc_stderr,none": 0.004521798577922137,
23
+ "acc_norm,none": 0.3088030272854013,
24
+ "acc_norm_stderr,none": 0.004610554974411229
25
+ },
26
+ "lambada_openai": {
27
+ "perplexity,none": 68.78788187981594,
28
+ "perplexity_stderr,none": 3.3418985414978897,
29
+ "acc,none": 0.2815835435668543,
30
+ "acc_stderr,none": 0.006266194106395877
31
+ },
32
+ "openbookqa": {
33
+ "acc,none": 0.158,
34
+ "acc_stderr,none": 0.01632804980457984,
35
+ "acc_norm,none": 0.254,
36
+ "acc_norm_stderr,none": 0.019486596801643368
37
+ },
38
+ "piqa": {
39
+ "acc,none": 0.6284004352557128,
40
+ "acc_stderr,none": 0.011274603006724757,
41
+ "acc_norm,none": 0.6332970620239391,
42
+ "acc_norm_stderr,none": 0.01124362501903826
43
+ },
44
+ "sciq": {
45
+ "acc,none": 0.76,
46
+ "acc_stderr,none": 0.013512312258920836,
47
+ "acc_norm,none": 0.737,
48
+ "acc_norm_stderr,none": 0.013929286594259724
49
+ },
50
+ "wikitext": {
51
+ "word_perplexity,none": 75.24993841350984,
52
+ "byte_perplexity,none": 2.0386909133746456,
53
+ "bits_per_byte,none": 1.0276430644458452
54
+ },
55
+ "winogrande": {
56
+ "acc,none": 0.5138121546961326,
57
+ "acc_stderr,none": 0.014047122916440419
58
+ }
59
+ },
60
+ "configs": {
61
+ "arc_challenge": {
62
+ "task": "arc_challenge",
63
+ "group": [
64
+ "ai2_arc",
65
+ "multiple_choice"
66
+ ],
67
+ "dataset_path": "ai2_arc",
68
+ "dataset_name": "ARC-Challenge",
69
+ "training_split": "train",
70
+ "validation_split": "validation",
71
+ "test_split": "test",
72
+ "doc_to_text": "Question: {{question}}\nAnswer:",
73
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
74
+ "doc_to_choice": "{{choices.text}}",
75
+ "description": "",
76
+ "target_delimiter": " ",
77
+ "fewshot_delimiter": "\n\n",
78
+ "num_fewshot": 5,
79
+ "metric_list": [
80
+ {
81
+ "metric": "acc",
82
+ "aggregation": "mean",
83
+ "higher_is_better": true
84
+ },
85
+ {
86
+ "metric": "acc_norm",
87
+ "aggregation": "mean",
88
+ "higher_is_better": true
89
+ }
90
+ ],
91
+ "output_type": "multiple_choice",
92
+ "repeats": 1,
93
+ "should_decontaminate": true,
94
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
95
+ },
96
+ "arc_easy": {
97
+ "task": "arc_easy",
98
+ "group": [
99
+ "ai2_arc",
100
+ "multiple_choice"
101
+ ],
102
+ "dataset_path": "ai2_arc",
103
+ "dataset_name": "ARC-Easy",
104
+ "training_split": "train",
105
+ "validation_split": "validation",
106
+ "test_split": "test",
107
+ "doc_to_text": "Question: {{question}}\nAnswer:",
108
+ "doc_to_target": "{{choices.label.index(answerKey)}}",
109
+ "doc_to_choice": "{{choices.text}}",
110
+ "description": "",
111
+ "target_delimiter": " ",
112
+ "fewshot_delimiter": "\n\n",
113
+ "num_fewshot": 5,
114
+ "metric_list": [
115
+ {
116
+ "metric": "acc",
117
+ "aggregation": "mean",
118
+ "higher_is_better": true
119
+ },
120
+ {
121
+ "metric": "acc_norm",
122
+ "aggregation": "mean",
123
+ "higher_is_better": true
124
+ }
125
+ ],
126
+ "output_type": "multiple_choice",
127
+ "repeats": 1,
128
+ "should_decontaminate": true,
129
+ "doc_to_decontamination_query": "Question: {{question}}\nAnswer:"
130
+ },
131
+ "boolq": {
132
+ "task": "boolq",
133
+ "group": [
134
+ "super-glue-lm-eval-v1"
135
+ ],
136
+ "dataset_path": "super_glue",
137
+ "dataset_name": "boolq",
138
+ "training_split": "train",
139
+ "validation_split": "validation",
140
+ "doc_to_text": "{{passage}}\nQuestion: {{question}}?\nAnswer:",
141
+ "doc_to_target": "label",
142
+ "doc_to_choice": [
143
+ "no",
144
+ "yes"
145
+ ],
146
+ "description": "",
147
+ "target_delimiter": " ",
148
+ "fewshot_delimiter": "\n\n",
149
+ "num_fewshot": 5,
150
+ "metric_list": [
151
+ {
152
+ "metric": "acc"
153
+ }
154
+ ],
155
+ "output_type": "multiple_choice",
156
+ "repeats": 1,
157
+ "should_decontaminate": true,
158
+ "doc_to_decontamination_query": "passage"
159
+ },
160
+ "hellaswag": {
161
+ "task": "hellaswag",
162
+ "group": [
163
+ "multiple_choice"
164
+ ],
165
+ "dataset_path": "hellaswag",
166
+ "training_split": "train",
167
+ "validation_split": "validation",
168
+ "doc_to_text": "{% set text = activity_label ~ ': ' ~ ctx_a ~ ' ' ~ ctx_b.capitalize() %}{{text|trim|replace(' [title]', '. ')|regex_replace('\\[.*?\\]', '')|replace(' ', ' ')}}",
169
+ "doc_to_target": "{{label}}",
170
+ "doc_to_choice": "{{endings|map('trim')|map('replace', ' [title]', '. ')|map('regex_replace', '\\[.*?\\]', '')|map('replace', ' ', ' ')|list}}",
171
+ "description": "",
172
+ "target_delimiter": " ",
173
+ "fewshot_delimiter": "\n\n",
174
+ "num_fewshot": 5,
175
+ "metric_list": [
176
+ {
177
+ "metric": "acc",
178
+ "aggregation": "mean",
179
+ "higher_is_better": true
180
+ },
181
+ {
182
+ "metric": "acc_norm",
183
+ "aggregation": "mean",
184
+ "higher_is_better": true
185
+ }
186
+ ],
187
+ "output_type": "multiple_choice",
188
+ "repeats": 1,
189
+ "should_decontaminate": false
190
+ },
191
+ "lambada_openai": {
192
+ "task": "lambada_openai",
193
+ "group": [
194
+ "lambada",
195
+ "loglikelihood",
196
+ "perplexity"
197
+ ],
198
+ "dataset_path": "EleutherAI/lambada_openai",
199
+ "dataset_name": "default",
200
+ "test_split": "test",
201
+ "template_aliases": "",
202
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
203
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
204
+ "description": "",
205
+ "target_delimiter": " ",
206
+ "fewshot_delimiter": "\n\n",
207
+ "num_fewshot": 5,
208
+ "metric_list": [
209
+ {
210
+ "metric": "perplexity",
211
+ "aggregation": "perplexity",
212
+ "higher_is_better": false
213
+ },
214
+ {
215
+ "metric": "acc",
216
+ "aggregation": "mean",
217
+ "higher_is_better": true
218
+ }
219
+ ],
220
+ "output_type": "loglikelihood",
221
+ "repeats": 1,
222
+ "should_decontaminate": true,
223
+ "doc_to_decontamination_query": "{{text}}"
224
+ },
225
+ "openbookqa": {
226
+ "task": "openbookqa",
227
+ "group": [
228
+ "multiple_choice"
229
+ ],
230
+ "dataset_path": "openbookqa",
231
+ "dataset_name": "main",
232
+ "training_split": "train",
233
+ "validation_split": "validation",
234
+ "test_split": "test",
235
+ "doc_to_text": "question_stem",
236
+ "doc_to_target": "{{choices.label.index(answerKey.lstrip())}}",
237
+ "doc_to_choice": "{{choices.text}}",
238
+ "description": "",
239
+ "target_delimiter": " ",
240
+ "fewshot_delimiter": "\n\n",
241
+ "num_fewshot": 5,
242
+ "metric_list": [
243
+ {
244
+ "metric": "acc",
245
+ "aggregation": "mean",
246
+ "higher_is_better": true
247
+ },
248
+ {
249
+ "metric": "acc_norm",
250
+ "aggregation": "mean",
251
+ "higher_is_better": true
252
+ }
253
+ ],
254
+ "output_type": "multiple_choice",
255
+ "repeats": 1,
256
+ "should_decontaminate": true,
257
+ "doc_to_decontamination_query": "question_stem"
258
+ },
259
+ "piqa": {
260
+ "task": "piqa",
261
+ "group": [
262
+ "multiple_choice"
263
+ ],
264
+ "dataset_path": "piqa",
265
+ "training_split": "train",
266
+ "validation_split": "validation",
267
+ "doc_to_text": "Question: {{goal}}\nAnswer:",
268
+ "doc_to_target": "label",
269
+ "doc_to_choice": "{{[sol1, sol2]}}",
270
+ "description": "",
271
+ "target_delimiter": " ",
272
+ "fewshot_delimiter": "\n\n",
273
+ "num_fewshot": 5,
274
+ "metric_list": [
275
+ {
276
+ "metric": "acc",
277
+ "aggregation": "mean",
278
+ "higher_is_better": true
279
+ },
280
+ {
281
+ "metric": "acc_norm",
282
+ "aggregation": "mean",
283
+ "higher_is_better": true
284
+ }
285
+ ],
286
+ "output_type": "multiple_choice",
287
+ "repeats": 1,
288
+ "should_decontaminate": true,
289
+ "doc_to_decontamination_query": "goal"
290
+ },
291
+ "sciq": {
292
+ "task": "sciq",
293
+ "group": [
294
+ "multiple_choice"
295
+ ],
296
+ "dataset_path": "sciq",
297
+ "training_split": "train",
298
+ "validation_split": "validation",
299
+ "test_split": "test",
300
+ "doc_to_text": "{{support.lstrip()}}\nQuestion: {{question}}\nAnswer:",
301
+ "doc_to_target": 3,
302
+ "doc_to_choice": "{{[distractor1, distractor2, distractor3, correct_answer]}}",
303
+ "description": "",
304
+ "target_delimiter": " ",
305
+ "fewshot_delimiter": "\n\n",
306
+ "num_fewshot": 5,
307
+ "metric_list": [
308
+ {
309
+ "metric": "acc",
310
+ "aggregation": "mean",
311
+ "higher_is_better": true
312
+ },
313
+ {
314
+ "metric": "acc_norm",
315
+ "aggregation": "mean",
316
+ "higher_is_better": true
317
+ }
318
+ ],
319
+ "output_type": "multiple_choice",
320
+ "repeats": 1,
321
+ "should_decontaminate": true,
322
+ "doc_to_decontamination_query": "{{support}} {{question}}"
323
+ },
324
+ "wikitext": {
325
+ "task": "wikitext",
326
+ "group": [
327
+ "perplexity",
328
+ "loglikelihood_rolling"
329
+ ],
330
+ "dataset_path": "EleutherAI/wikitext_document_level",
331
+ "dataset_name": "wikitext-2-raw-v1",
332
+ "training_split": "train",
333
+ "validation_split": "validation",
334
+ "test_split": "test",
335
+ "template_aliases": "",
336
+ "doc_to_text": "",
337
+ "doc_to_target": "<function wikitext_detokenizer at 0x7ff6ce1e4040>",
338
+ "description": "",
339
+ "target_delimiter": " ",
340
+ "fewshot_delimiter": "\n\n",
341
+ "num_fewshot": 5,
342
+ "metric_list": [
343
+ {
344
+ "metric": "word_perplexity"
345
+ },
346
+ {
347
+ "metric": "byte_perplexity"
348
+ },
349
+ {
350
+ "metric": "bits_per_byte"
351
+ }
352
+ ],
353
+ "output_type": "loglikelihood_rolling",
354
+ "repeats": 1,
355
+ "should_decontaminate": true,
356
+ "doc_to_decontamination_query": "{{page}}"
357
+ },
358
+ "winogrande": {
359
+ "task": "winogrande",
360
+ "dataset_path": "winogrande",
361
+ "dataset_name": "winogrande_xl",
362
+ "training_split": "train",
363
+ "validation_split": "validation",
364
+ "doc_to_text": "<function doc_to_text at 0x7ff6ce1b6ef0>",
365
+ "doc_to_target": "<function doc_to_target at 0x7ff6ce1b7370>",
366
+ "doc_to_choice": "<function doc_to_choice at 0x7ff6ce1b75b0>",
367
+ "description": "",
368
+ "target_delimiter": " ",
369
+ "fewshot_delimiter": "\n\n",
370
+ "num_fewshot": 5,
371
+ "metric_list": [
372
+ {
373
+ "metric": "acc",
374
+ "aggregation": "mean",
375
+ "higher_is_better": true
376
+ }
377
+ ],
378
+ "output_type": "multiple_choice",
379
+ "repeats": 1,
380
+ "should_decontaminate": false
381
+ }
382
+ },
383
+ "versions": {
384
+ "arc_challenge": "Yaml",
385
+ "arc_easy": "Yaml",
386
+ "boolq": "Yaml",
387
+ "hellaswag": "Yaml",
388
+ "lambada_openai": "Yaml",
389
+ "openbookqa": "Yaml",
390
+ "piqa": "Yaml",
391
+ "sciq": "Yaml",
392
+ "wikitext": "Yaml",
393
+ "winogrande": "Yaml"
394
+ },
395
+ "config": {
396
+ "model": "hf",
397
+ "model_args": "pretrained=lomahony/eleuther-pythia160m-hh-dpo",
398
+ "num_fewshot": 5,
399
+ "batch_size": 16,
400
+ "batch_sizes": [],
401
+ "device": "cuda:0",
402
+ "use_cache": null,
403
+ "limit": null,
404
+ "bootstrap_iters": 100000
405
+ },
406
+ "git_hash": "4e44f0a"
407
+ }
408
+ hf (pretrained=lomahony/eleuther-pythia160m-hh-dpo), limit: None, num_fewshot: 5, batch_size: 16
409
+ | Task |Version|Filter| Metric | Value | |Stderr|
410
+ |--------------|-------|------|---------------|------:|---|-----:|
411
+ |arc_challenge |Yaml |none |acc | 0.2056|± |0.0118|
412
+ | | |none |acc_norm | 0.2474|± |0.0126|
413
+ |arc_easy |Yaml |none |acc | 0.4638|± |0.0102|
414
+ | | |none |acc_norm | 0.4386|± |0.0102|
415
+ |boolq |Yaml |none |acc | 0.6055|± |0.0085|
416
+ |hellaswag |Yaml |none |acc | 0.2886|± |0.0045|
417
+ | | |none |acc_norm | 0.3088|± |0.0046|
418
+ |lambada_openai|Yaml |none |perplexity |68.7879|± |3.3419|
419
+ | | |none |acc | 0.2816|± |0.0063|
420
+ |openbookqa |Yaml |none |acc | 0.1580|± |0.0163|
421
+ | | |none |acc_norm | 0.2540|± |0.0195|
422
+ |piqa |Yaml |none |acc | 0.6284|± |0.0113|
423
+ | | |none |acc_norm | 0.6333|± |0.0112|
424
+ |sciq |Yaml |none |acc | 0.7600|± |0.0135|
425
+ | | |none |acc_norm | 0.7370|± |0.0139|
426
+ |wikitext |Yaml |none |word_perplexity|75.2499| | |
427
+ | | |none |byte_perplexity| 2.0387| | |
428
+ | | |none |bits_per_byte | 1.0276| | |
429
+ |winogrande |Yaml |none |acc | 0.5138|± |0.0140|
430
+