nicholasKluge commited on
Commit
575c140
1 Parent(s): 84578e7

Upload results-pt-sft.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. results-pt-sft.json +1303 -0
results-pt-sft.json ADDED
@@ -0,0 +1,1303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "assin2_rte": {
4
+ "f1_macro,all": 0.3333333333333333,
5
+ "f1_macro_stderr,all": 0.0031730251394380704,
6
+ "acc,all": 0.5,
7
+ "acc_stderr,all": 0.007138073526203421,
8
+ "alias": "assin2_rte"
9
+ },
10
+ "assin2_sts": {
11
+ "pearson,all": 0.07790738376477561,
12
+ "pearson_stderr,all": 0.014716684813439029,
13
+ "mse,all": 2.066646241830065,
14
+ "mse_stderr,all": "N/A",
15
+ "alias": "assin2_sts"
16
+ },
17
+ "bluex": {
18
+ "acc,all": 0.20584144645340752,
19
+ "acc_stderr,all": 0.008708200533939503,
20
+ "acc,exam_id__USP_2024": 0.24390243902439024,
21
+ "acc_stderr,exam_id__USP_2024": 0.038765983941525854,
22
+ "acc,exam_id__UNICAMP_2018": 0.2222222222222222,
23
+ "acc_stderr,exam_id__UNICAMP_2018": 0.032540841899570724,
24
+ "acc,exam_id__USP_2018": 0.16666666666666666,
25
+ "acc_stderr,exam_id__USP_2018": 0.029320906143906797,
26
+ "acc,exam_id__USP_2022": 0.20408163265306123,
27
+ "acc_stderr,exam_id__USP_2022": 0.03318725036103681,
28
+ "acc,exam_id__USP_2021": 0.1346153846153846,
29
+ "acc_stderr,exam_id__USP_2021": 0.027253785013691273,
30
+ "acc,exam_id__UNICAMP_2021_2": 0.27450980392156865,
31
+ "acc_stderr,exam_id__UNICAMP_2021_2": 0.0360141604917446,
32
+ "acc,exam_id__UNICAMP_2021_1": 0.2826086956521739,
33
+ "acc_stderr,exam_id__UNICAMP_2021_1": 0.038353605844743385,
34
+ "acc,exam_id__USP_2020": 0.08928571428571429,
35
+ "acc_stderr,exam_id__USP_2020": 0.021921367122397676,
36
+ "acc,exam_id__UNICAMP_2022": 0.3076923076923077,
37
+ "acc_stderr,exam_id__UNICAMP_2022": 0.042727899536711536,
38
+ "acc,exam_id__UNICAMP_2023": 0.16279069767441862,
39
+ "acc_stderr,exam_id__UNICAMP_2023": 0.03238294764474062,
40
+ "acc,exam_id__UNICAMP_2024": 0.3111111111111111,
41
+ "acc_stderr,exam_id__UNICAMP_2024": 0.0398333923513593,
42
+ "acc,exam_id__UNICAMP_2020": 0.18181818181818182,
43
+ "acc_stderr,exam_id__UNICAMP_2020": 0.02992924246353086,
44
+ "acc,exam_id__USP_2023": 0.18181818181818182,
45
+ "acc_stderr,exam_id__USP_2023": 0.03345273573325805,
46
+ "acc,exam_id__UNICAMP_2019": 0.2,
47
+ "acc_stderr,exam_id__UNICAMP_2019": 0.03269771596389771,
48
+ "acc,exam_id__USP_2019": 0.175,
49
+ "acc_stderr,exam_id__USP_2019": 0.0348121283420538,
50
+ "alias": "bluex"
51
+ },
52
+ "enem_challenge": {
53
+ "alias": "enem",
54
+ "acc,all": 0.2092372288313506,
55
+ "acc_stderr,all": 0.006229253057208555,
56
+ "acc,exam_id__2016_2": 0.21138211382113822,
57
+ "acc_stderr,exam_id__2016_2": 0.02123691588211098,
58
+ "acc,exam_id__2023": 0.1925925925925926,
59
+ "acc_stderr,exam_id__2023": 0.019583861187616968,
60
+ "acc,exam_id__2013": 0.2037037037037037,
61
+ "acc_stderr,exam_id__2013": 0.02238599079325693,
62
+ "acc,exam_id__2012": 0.12931034482758622,
63
+ "acc_stderr,exam_id__2012": 0.01803175905554286,
64
+ "acc,exam_id__2009": 0.23478260869565218,
65
+ "acc_stderr,exam_id__2009": 0.022814803582640184,
66
+ "acc,exam_id__2022": 0.17293233082706766,
67
+ "acc_stderr,exam_id__2022": 0.01889827424607104,
68
+ "acc,exam_id__2015": 0.31932773109243695,
69
+ "acc_stderr,exam_id__2015": 0.02472561230954832,
70
+ "acc,exam_id__2014": 0.21100917431192662,
71
+ "acc_stderr,exam_id__2014": 0.022513127008089658,
72
+ "acc,exam_id__2016": 0.18181818181818182,
73
+ "acc_stderr,exam_id__2016": 0.020248897347876847,
74
+ "acc,exam_id__2010": 0.23076923076923078,
75
+ "acc_stderr,exam_id__2010": 0.02246971773699712,
76
+ "acc,exam_id__2017": 0.27586206896551724,
77
+ "acc_stderr,exam_id__2017": 0.023994074423977864,
78
+ "acc,exam_id__2011": 0.15384615384615385,
79
+ "acc_stderr,exam_id__2011": 0.019264278502154123
80
+ },
81
+ "faquad_nli": {
82
+ "f1_macro,all": 0.4396551724137931,
83
+ "f1_macro_stderr,all": 0.0035796984729087084,
84
+ "acc,all": 0.7846153846153846,
85
+ "acc_stderr,all": 0.011396120309131327,
86
+ "alias": "faquad_nli"
87
+ },
88
+ "hatebr_offensive": {
89
+ "alias": "hatebr_offensive_binary",
90
+ "f1_macro,all": 0.43054708155379295,
91
+ "f1_macro_stderr,all": 0.009093679844467082,
92
+ "acc,all": 0.4742857142857143,
93
+ "acc_stderr,all": 0.009437507998400261
94
+ },
95
+ "oab_exams": {
96
+ "acc,all": 0.25968109339407747,
97
+ "acc_stderr,all": 0.005403181658894358,
98
+ "acc,exam_id__2017-22": 0.2375,
99
+ "acc_stderr,exam_id__2017-22": 0.027511429390216682,
100
+ "acc,exam_id__2016-20a": 0.2,
101
+ "acc_stderr,exam_id__2016-20a": 0.02584175311098727,
102
+ "acc,exam_id__2011-04": 0.2875,
103
+ "acc_stderr,exam_id__2011-04": 0.02919454405528515,
104
+ "acc,exam_id__2013-12": 0.2875,
105
+ "acc_stderr,exam_id__2013-12": 0.029277381115049662,
106
+ "acc,exam_id__2013-11": 0.325,
107
+ "acc_stderr,exam_id__2013-11": 0.030286419424458838,
108
+ "acc,exam_id__2010-02": 0.32,
109
+ "acc_stderr,exam_id__2010-02": 0.026888774775418785,
110
+ "acc,exam_id__2012-07": 0.2375,
111
+ "acc_stderr,exam_id__2012-07": 0.02737558649609428,
112
+ "acc,exam_id__2016-21": 0.25,
113
+ "acc_stderr,exam_id__2016-21": 0.027994547544285982,
114
+ "acc,exam_id__2015-17": 0.24358974358974358,
115
+ "acc_stderr,exam_id__2015-17": 0.02793267139214751,
116
+ "acc,exam_id__2015-18": 0.2625,
117
+ "acc_stderr,exam_id__2015-18": 0.028396710161944567,
118
+ "acc,exam_id__2014-14": 0.225,
119
+ "acc_stderr,exam_id__2014-14": 0.026939185801353988,
120
+ "acc,exam_id__2015-16": 0.3375,
121
+ "acc_stderr,exam_id__2015-16": 0.030631826713546063,
122
+ "acc,exam_id__2012-09": 0.23376623376623376,
123
+ "acc_stderr,exam_id__2012-09": 0.02787359925121907,
124
+ "acc,exam_id__2011-03": 0.26262626262626265,
125
+ "acc_stderr,exam_id__2011-03": 0.025505720074946718,
126
+ "acc,exam_id__2016-19": 0.2692307692307692,
127
+ "acc_stderr,exam_id__2016-19": 0.028948751914583667,
128
+ "acc,exam_id__2012-06a": 0.2375,
129
+ "acc_stderr,exam_id__2012-06a": 0.027440075549438697,
130
+ "acc,exam_id__2011-05": 0.2625,
131
+ "acc_stderr,exam_id__2011-05": 0.02835789202564455,
132
+ "acc,exam_id__2013-10": 0.2125,
133
+ "acc_stderr,exam_id__2013-10": 0.026367641247603036,
134
+ "acc,exam_id__2017-24": 0.25,
135
+ "acc_stderr,exam_id__2017-24": 0.028053164455460838,
136
+ "acc,exam_id__2016-20": 0.2625,
137
+ "acc_stderr,exam_id__2016-20": 0.0283327789711091,
138
+ "acc,exam_id__2012-08": 0.275,
139
+ "acc_stderr,exam_id__2012-08": 0.02893752928626648,
140
+ "acc,exam_id__2014-13": 0.2,
141
+ "acc_stderr,exam_id__2014-13": 0.025784866156114444,
142
+ "acc,exam_id__2018-25": 0.25,
143
+ "acc_stderr,exam_id__2018-25": 0.027961840366717016,
144
+ "acc,exam_id__2017-23": 0.3,
145
+ "acc_stderr,exam_id__2017-23": 0.02953160157687412,
146
+ "acc,exam_id__2014-15": 0.2564102564102564,
147
+ "acc_stderr,exam_id__2014-15": 0.028456647275964232,
148
+ "acc,exam_id__2012-06": 0.275,
149
+ "acc_stderr,exam_id__2012-06": 0.028790584320040398,
150
+ "acc,exam_id__2010-01": 0.23529411764705882,
151
+ "acc_stderr,exam_id__2010-01": 0.026470002521428834,
152
+ "alias": "oab_exams"
153
+ },
154
+ "portuguese_hate_speech": {
155
+ "alias": "portuguese_hate_speech_binary",
156
+ "f1_macro,all": 0.35895915678524376,
157
+ "f1_macro_stderr,all": 0.011204300451999685,
158
+ "acc,all": 0.381903642773208,
159
+ "acc_stderr,all": 0.011741654959752653
160
+ },
161
+ "tweetsentbr": {
162
+ "f1_macro,all": 0.2114730555936155,
163
+ "f1_macro_stderr,all": 0.0056538657419370805,
164
+ "acc,all": 0.3169154228855721,
165
+ "acc_stderr,all": 0.007343236186351586,
166
+ "alias": "tweetsentbr"
167
+ }
168
+ },
169
+ "configs": {
170
+ "assin2_rte": {
171
+ "task": "assin2_rte",
172
+ "group": [
173
+ "pt_benchmark",
174
+ "assin2"
175
+ ],
176
+ "dataset_path": "assin2",
177
+ "test_split": "test",
178
+ "fewshot_split": "train",
179
+ "doc_to_text": "Premissa: {{premise}}\nHipótese: {{hypothesis}}\nPergunta: A hipótese pode ser inferida pela premissa? Sim ou Não?\nResposta:",
180
+ "doc_to_target": "{{['Não', 'Sim'][entailment_judgment]}}",
181
+ "description": "Abaixo estão pares de premissa e hipótese. Para cada par, indique se a hipótese pode ser inferida a partir da premissa, responda apenas com \"Sim\" ou \"Não\".\n\n",
182
+ "target_delimiter": " ",
183
+ "fewshot_delimiter": "\n\n",
184
+ "fewshot_config": {
185
+ "sampler": "id_sampler",
186
+ "sampler_config": {
187
+ "id_list": [
188
+ 1,
189
+ 3251,
190
+ 2,
191
+ 3252,
192
+ 3,
193
+ 4,
194
+ 5,
195
+ 6,
196
+ 3253,
197
+ 7,
198
+ 3254,
199
+ 3255,
200
+ 3256,
201
+ 8,
202
+ 9,
203
+ 10,
204
+ 3257,
205
+ 11,
206
+ 3258,
207
+ 12,
208
+ 13,
209
+ 14,
210
+ 15,
211
+ 3259,
212
+ 3260,
213
+ 3261,
214
+ 3262,
215
+ 3263,
216
+ 16,
217
+ 17,
218
+ 3264,
219
+ 18,
220
+ 3265,
221
+ 3266,
222
+ 3267,
223
+ 19,
224
+ 20,
225
+ 3268,
226
+ 3269,
227
+ 21,
228
+ 3270,
229
+ 3271,
230
+ 22,
231
+ 3272,
232
+ 3273,
233
+ 23,
234
+ 3274,
235
+ 24,
236
+ 25,
237
+ 3275
238
+ ],
239
+ "id_column": "sentence_pair_id"
240
+ }
241
+ },
242
+ "num_fewshot": 15,
243
+ "metric_list": [
244
+ {
245
+ "metric": "f1_macro",
246
+ "aggregation": "f1_macro",
247
+ "higher_is_better": true
248
+ },
249
+ {
250
+ "metric": "acc",
251
+ "aggregation": "acc",
252
+ "higher_is_better": true
253
+ }
254
+ ],
255
+ "output_type": "generate_until",
256
+ "generation_kwargs": {
257
+ "max_gen_toks": 32,
258
+ "do_sample": false,
259
+ "temperature": 0.0,
260
+ "top_k": null,
261
+ "top_p": null,
262
+ "until": [
263
+ "\n\n"
264
+ ]
265
+ },
266
+ "repeats": 1,
267
+ "filter_list": [
268
+ {
269
+ "name": "all",
270
+ "filter": [
271
+ {
272
+ "function": "find_similar_label",
273
+ "labels": [
274
+ "Sim",
275
+ "Não"
276
+ ]
277
+ },
278
+ {
279
+ "function": "take_first"
280
+ }
281
+ ]
282
+ }
283
+ ],
284
+ "should_decontaminate": false,
285
+ "metadata": {
286
+ "version": 1.1
287
+ }
288
+ },
289
+ "assin2_sts": {
290
+ "task": "assin2_sts",
291
+ "group": [
292
+ "pt_benchmark",
293
+ "assin2"
294
+ ],
295
+ "dataset_path": "assin2",
296
+ "test_split": "test",
297
+ "fewshot_split": "train",
298
+ "doc_to_text": "Frase 1: {{premise}}\nFrase 2: {{hypothesis}}\nPergunta: Quão similares são as duas frases? Dê uma pontuação entre 1,0 a 5,0.\nResposta:",
299
+ "doc_to_target": "<function assin2_float_to_pt_str at 0x14c26cd9a5c0>",
300
+ "description": "Abaixo estão pares de frases que você deve avaliar o grau de similaridade. Dê uma pontuação entre 1,0 e 5,0, sendo 1,0 pouco similar e 5,0 muito similar.\n\n",
301
+ "target_delimiter": " ",
302
+ "fewshot_delimiter": "\n\n",
303
+ "fewshot_config": {
304
+ "sampler": "id_sampler",
305
+ "sampler_config": {
306
+ "id_list": [
307
+ 1,
308
+ 3251,
309
+ 2,
310
+ 3252,
311
+ 3,
312
+ 4,
313
+ 5,
314
+ 6,
315
+ 3253,
316
+ 7,
317
+ 3254,
318
+ 3255,
319
+ 3256,
320
+ 8,
321
+ 9,
322
+ 10,
323
+ 3257,
324
+ 11,
325
+ 3258,
326
+ 12,
327
+ 13,
328
+ 14,
329
+ 15,
330
+ 3259,
331
+ 3260,
332
+ 3261,
333
+ 3262,
334
+ 3263,
335
+ 16,
336
+ 17,
337
+ 3264,
338
+ 18,
339
+ 3265,
340
+ 3266,
341
+ 3267,
342
+ 19,
343
+ 20,
344
+ 3268,
345
+ 3269,
346
+ 21,
347
+ 3270,
348
+ 3271,
349
+ 22,
350
+ 3272,
351
+ 3273,
352
+ 23,
353
+ 3274,
354
+ 24,
355
+ 25,
356
+ 3275
357
+ ],
358
+ "id_column": "sentence_pair_id"
359
+ }
360
+ },
361
+ "num_fewshot": 10,
362
+ "metric_list": [
363
+ {
364
+ "metric": "pearson",
365
+ "aggregation": "pearsonr",
366
+ "higher_is_better": true
367
+ },
368
+ {
369
+ "metric": "mse",
370
+ "aggregation": "mean_squared_error",
371
+ "higher_is_better": false
372
+ }
373
+ ],
374
+ "output_type": "generate_until",
375
+ "generation_kwargs": {
376
+ "max_gen_toks": 32,
377
+ "do_sample": false,
378
+ "temperature": 0.0,
379
+ "top_k": null,
380
+ "top_p": null,
381
+ "until": [
382
+ "\n\n"
383
+ ]
384
+ },
385
+ "repeats": 1,
386
+ "filter_list": [
387
+ {
388
+ "name": "all",
389
+ "filter": [
390
+ {
391
+ "function": "number_filter",
392
+ "type": "float",
393
+ "range_min": 1.0,
394
+ "range_max": 5.0,
395
+ "on_outside_range": "clip",
396
+ "fallback": 5.0
397
+ },
398
+ {
399
+ "function": "take_first"
400
+ }
401
+ ]
402
+ }
403
+ ],
404
+ "should_decontaminate": false,
405
+ "metadata": {
406
+ "version": 1.1
407
+ }
408
+ },
409
+ "bluex": {
410
+ "task": "bluex",
411
+ "group": [
412
+ "pt_benchmark",
413
+ "vestibular"
414
+ ],
415
+ "dataset_path": "eduagarcia-temp/BLUEX_without_images",
416
+ "test_split": "train",
417
+ "fewshot_split": "train",
418
+ "doc_to_text": "<function enem_doc_to_text at 0x14c26cd99b20>",
419
+ "doc_to_target": "{{answerKey}}",
420
+ "description": "As perguntas a seguir são questões de múltipla escolha de provas de vestibular de universidades brasileiras, selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
421
+ "target_delimiter": " ",
422
+ "fewshot_delimiter": "\n\n",
423
+ "fewshot_config": {
424
+ "sampler": "id_sampler",
425
+ "sampler_config": {
426
+ "id_list": [
427
+ "USP_2018_3",
428
+ "UNICAMP_2018_2",
429
+ "USP_2018_35",
430
+ "UNICAMP_2018_16",
431
+ "USP_2018_89"
432
+ ],
433
+ "id_column": "id",
434
+ "exclude_from_task": true
435
+ }
436
+ },
437
+ "num_fewshot": 3,
438
+ "metric_list": [
439
+ {
440
+ "metric": "acc",
441
+ "aggregation": "acc",
442
+ "higher_is_better": true
443
+ }
444
+ ],
445
+ "output_type": "generate_until",
446
+ "generation_kwargs": {
447
+ "max_gen_toks": 32,
448
+ "do_sample": false,
449
+ "temperature": 0.0,
450
+ "top_k": null,
451
+ "top_p": null,
452
+ "until": [
453
+ "\n\n"
454
+ ]
455
+ },
456
+ "repeats": 1,
457
+ "filter_list": [
458
+ {
459
+ "name": "all",
460
+ "filter": [
461
+ {
462
+ "function": "normalize_spaces"
463
+ },
464
+ {
465
+ "function": "remove_accents"
466
+ },
467
+ {
468
+ "function": "find_choices",
469
+ "choices": [
470
+ "A",
471
+ "B",
472
+ "C",
473
+ "D",
474
+ "E"
475
+ ],
476
+ "regex_patterns": [
477
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
478
+ "\\b([ABCDE])\\.",
479
+ "\\b([ABCDE]) ?[.):-]",
480
+ "\\b([ABCDE])$",
481
+ "\\b([ABCDE])\\b"
482
+ ]
483
+ },
484
+ {
485
+ "function": "take_first"
486
+ }
487
+ ],
488
+ "group_by": {
489
+ "column": "exam_id"
490
+ }
491
+ }
492
+ ],
493
+ "should_decontaminate": true,
494
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x14c26cd99e40>",
495
+ "metadata": {
496
+ "version": 1.1
497
+ }
498
+ },
499
+ "enem_challenge": {
500
+ "task": "enem_challenge",
501
+ "task_alias": "enem",
502
+ "group": [
503
+ "pt_benchmark",
504
+ "vestibular"
505
+ ],
506
+ "dataset_path": "eduagarcia/enem_challenge",
507
+ "test_split": "train",
508
+ "fewshot_split": "train",
509
+ "doc_to_text": "<function enem_doc_to_text at 0x14c26cd9a020>",
510
+ "doc_to_target": "{{answerKey}}",
511
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame Nacional do Ensino Médio (ENEM), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\", \"D\" ou \"E\".\n\n",
512
+ "target_delimiter": " ",
513
+ "fewshot_delimiter": "\n\n",
514
+ "fewshot_config": {
515
+ "sampler": "id_sampler",
516
+ "sampler_config": {
517
+ "id_list": [
518
+ "2022_21",
519
+ "2022_88",
520
+ "2022_143"
521
+ ],
522
+ "id_column": "id",
523
+ "exclude_from_task": true
524
+ }
525
+ },
526
+ "num_fewshot": 3,
527
+ "metric_list": [
528
+ {
529
+ "metric": "acc",
530
+ "aggregation": "acc",
531
+ "higher_is_better": true
532
+ }
533
+ ],
534
+ "output_type": "generate_until",
535
+ "generation_kwargs": {
536
+ "max_gen_toks": 32,
537
+ "do_sample": false,
538
+ "temperature": 0.0,
539
+ "top_k": null,
540
+ "top_p": null,
541
+ "until": [
542
+ "\n\n"
543
+ ]
544
+ },
545
+ "repeats": 1,
546
+ "filter_list": [
547
+ {
548
+ "name": "all",
549
+ "filter": [
550
+ {
551
+ "function": "normalize_spaces"
552
+ },
553
+ {
554
+ "function": "remove_accents"
555
+ },
556
+ {
557
+ "function": "find_choices",
558
+ "choices": [
559
+ "A",
560
+ "B",
561
+ "C",
562
+ "D",
563
+ "E"
564
+ ],
565
+ "regex_patterns": [
566
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCDE])\\b",
567
+ "\\b([ABCDE])\\.",
568
+ "\\b([ABCDE]) ?[.):-]",
569
+ "\\b([ABCDE])$",
570
+ "\\b([ABCDE])\\b"
571
+ ]
572
+ },
573
+ {
574
+ "function": "take_first"
575
+ }
576
+ ],
577
+ "group_by": {
578
+ "column": "exam_id"
579
+ }
580
+ }
581
+ ],
582
+ "should_decontaminate": true,
583
+ "doc_to_decontamination_query": "<function enem_doc_to_text at 0x14c26cd9a200>",
584
+ "metadata": {
585
+ "version": 1.1
586
+ }
587
+ },
588
+ "faquad_nli": {
589
+ "task": "faquad_nli",
590
+ "group": [
591
+ "pt_benchmark"
592
+ ],
593
+ "dataset_path": "ruanchaves/faquad-nli",
594
+ "test_split": "test",
595
+ "fewshot_split": "train",
596
+ "doc_to_text": "Pergunta: {{question}}\nResposta: {{answer}}\nA resposta dada satisfaz à pergunta? Sim ou Não?",
597
+ "doc_to_target": "{{['Não', 'Sim'][label]}}",
598
+ "description": "Abaixo estão pares de pergunta e resposta. Para cada par, você deve julgar se a resposta responde à pergunta de maneira satisfatória e aparenta estar correta. Escreva apenas \"Sim\" ou \"Não\".\n\n",
599
+ "target_delimiter": " ",
600
+ "fewshot_delimiter": "\n\n",
601
+ "fewshot_config": {
602
+ "sampler": "first_n",
603
+ "sampler_config": {
604
+ "fewshot_indices": [
605
+ 1893,
606
+ 949,
607
+ 663,
608
+ 105,
609
+ 1169,
610
+ 2910,
611
+ 2227,
612
+ 2813,
613
+ 974,
614
+ 558,
615
+ 1503,
616
+ 1958,
617
+ 2918,
618
+ 601,
619
+ 1560,
620
+ 984,
621
+ 2388,
622
+ 995,
623
+ 2233,
624
+ 1982,
625
+ 165,
626
+ 2788,
627
+ 1312,
628
+ 2285,
629
+ 522,
630
+ 1113,
631
+ 1670,
632
+ 323,
633
+ 236,
634
+ 1263,
635
+ 1562,
636
+ 2519,
637
+ 1049,
638
+ 432,
639
+ 1167,
640
+ 1394,
641
+ 2022,
642
+ 2551,
643
+ 2194,
644
+ 2187,
645
+ 2282,
646
+ 2816,
647
+ 108,
648
+ 301,
649
+ 1185,
650
+ 1315,
651
+ 1420,
652
+ 2436,
653
+ 2322,
654
+ 766
655
+ ]
656
+ }
657
+ },
658
+ "num_fewshot": 15,
659
+ "metric_list": [
660
+ {
661
+ "metric": "f1_macro",
662
+ "aggregation": "f1_macro",
663
+ "higher_is_better": true
664
+ },
665
+ {
666
+ "metric": "acc",
667
+ "aggregation": "acc",
668
+ "higher_is_better": true
669
+ }
670
+ ],
671
+ "output_type": "generate_until",
672
+ "generation_kwargs": {
673
+ "max_gen_toks": 32,
674
+ "do_sample": false,
675
+ "temperature": 0.0,
676
+ "top_k": null,
677
+ "top_p": null,
678
+ "until": [
679
+ "\n\n"
680
+ ]
681
+ },
682
+ "repeats": 1,
683
+ "filter_list": [
684
+ {
685
+ "name": "all",
686
+ "filter": [
687
+ {
688
+ "function": "find_similar_label",
689
+ "labels": [
690
+ "Sim",
691
+ "Não"
692
+ ]
693
+ },
694
+ {
695
+ "function": "take_first"
696
+ }
697
+ ]
698
+ }
699
+ ],
700
+ "should_decontaminate": false,
701
+ "metadata": {
702
+ "version": 1.1
703
+ }
704
+ },
705
+ "hatebr_offensive": {
706
+ "task": "hatebr_offensive",
707
+ "task_alias": "hatebr_offensive_binary",
708
+ "group": [
709
+ "pt_benchmark"
710
+ ],
711
+ "dataset_path": "eduagarcia/portuguese_benchmark",
712
+ "dataset_name": "HateBR_offensive_binary",
713
+ "test_split": "test",
714
+ "fewshot_split": "train",
715
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto é ofensivo?\nResposta:",
716
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
717
+ "description": "Abaixo contém o texto de comentários de usuários do Instagram em português, sua tarefa é classificar se o texto é ofensivo ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
718
+ "target_delimiter": " ",
719
+ "fewshot_delimiter": "\n\n",
720
+ "fewshot_config": {
721
+ "sampler": "id_sampler",
722
+ "sampler_config": {
723
+ "id_list": [
724
+ 48,
725
+ 44,
726
+ 36,
727
+ 20,
728
+ 3511,
729
+ 88,
730
+ 3555,
731
+ 16,
732
+ 56,
733
+ 3535,
734
+ 60,
735
+ 40,
736
+ 3527,
737
+ 4,
738
+ 76,
739
+ 3579,
740
+ 3523,
741
+ 3551,
742
+ 68,
743
+ 3503,
744
+ 84,
745
+ 3539,
746
+ 64,
747
+ 3599,
748
+ 80,
749
+ 3563,
750
+ 3559,
751
+ 3543,
752
+ 3547,
753
+ 3587,
754
+ 3595,
755
+ 3575,
756
+ 3567,
757
+ 3591,
758
+ 24,
759
+ 96,
760
+ 92,
761
+ 3507,
762
+ 52,
763
+ 72,
764
+ 8,
765
+ 3571,
766
+ 3515,
767
+ 3519,
768
+ 3531,
769
+ 28,
770
+ 32,
771
+ 0,
772
+ 12,
773
+ 3583
774
+ ],
775
+ "id_column": "idx"
776
+ }
777
+ },
778
+ "num_fewshot": 25,
779
+ "metric_list": [
780
+ {
781
+ "metric": "f1_macro",
782
+ "aggregation": "f1_macro",
783
+ "higher_is_better": true
784
+ },
785
+ {
786
+ "metric": "acc",
787
+ "aggregation": "acc",
788
+ "higher_is_better": true
789
+ }
790
+ ],
791
+ "output_type": "generate_until",
792
+ "generation_kwargs": {
793
+ "max_gen_toks": 32,
794
+ "do_sample": false,
795
+ "temperature": 0.0,
796
+ "top_k": null,
797
+ "top_p": null,
798
+ "until": [
799
+ "\n\n"
800
+ ]
801
+ },
802
+ "repeats": 1,
803
+ "filter_list": [
804
+ {
805
+ "name": "all",
806
+ "filter": [
807
+ {
808
+ "function": "find_similar_label",
809
+ "labels": [
810
+ "Sim",
811
+ "Não"
812
+ ]
813
+ },
814
+ {
815
+ "function": "take_first"
816
+ }
817
+ ]
818
+ }
819
+ ],
820
+ "should_decontaminate": false,
821
+ "metadata": {
822
+ "version": 1.0
823
+ }
824
+ },
825
+ "oab_exams": {
826
+ "task": "oab_exams",
827
+ "group": [
828
+ "legal_benchmark",
829
+ "pt_benchmark"
830
+ ],
831
+ "dataset_path": "eduagarcia/oab_exams",
832
+ "test_split": "train",
833
+ "fewshot_split": "train",
834
+ "doc_to_text": "<function doc_to_text at 0x14c26cd9ad40>",
835
+ "doc_to_target": "{{answerKey}}",
836
+ "description": "As perguntas a seguir são questões de múltipla escolha do Exame de Ordem da Ordem dos Advogados do Brasil (OAB), selecione a única alternativa correta e responda apenas com as letras \"A\", \"B\", \"C\" ou \"D\".\n\n",
837
+ "target_delimiter": " ",
838
+ "fewshot_delimiter": "\n\n",
839
+ "fewshot_config": {
840
+ "sampler": "id_sampler",
841
+ "sampler_config": {
842
+ "id_list": [
843
+ "2010-01_1",
844
+ "2010-01_11",
845
+ "2010-01_13",
846
+ "2010-01_23",
847
+ "2010-01_26",
848
+ "2010-01_28",
849
+ "2010-01_38",
850
+ "2010-01_48",
851
+ "2010-01_58",
852
+ "2010-01_68",
853
+ "2010-01_76",
854
+ "2010-01_83",
855
+ "2010-01_85",
856
+ "2010-01_91",
857
+ "2010-01_99"
858
+ ],
859
+ "id_column": "id",
860
+ "exclude_from_task": true
861
+ }
862
+ },
863
+ "num_fewshot": 3,
864
+ "metric_list": [
865
+ {
866
+ "metric": "acc",
867
+ "aggregation": "acc",
868
+ "higher_is_better": true
869
+ }
870
+ ],
871
+ "output_type": "generate_until",
872
+ "generation_kwargs": {
873
+ "max_gen_toks": 32,
874
+ "do_sample": false,
875
+ "temperature": 0.0,
876
+ "top_k": null,
877
+ "top_p": null,
878
+ "until": [
879
+ "\n\n"
880
+ ]
881
+ },
882
+ "repeats": 1,
883
+ "filter_list": [
884
+ {
885
+ "name": "all",
886
+ "filter": [
887
+ {
888
+ "function": "normalize_spaces"
889
+ },
890
+ {
891
+ "function": "remove_accents"
892
+ },
893
+ {
894
+ "function": "find_choices",
895
+ "choices": [
896
+ "A",
897
+ "B",
898
+ "C",
899
+ "D"
900
+ ],
901
+ "regex_patterns": [
902
+ "(?:[Ll]etra|[Aa]lternativa|[Rr]esposta|[Rr]esposta [Cc]orreta|[Rr]esposta [Cc]orreta e|[Oo]pcao):? ([ABCD])\\b",
903
+ "\\b([ABCD])\\.",
904
+ "\\b([ABCD]) ?[.):-]",
905
+ "\\b([ABCD])$",
906
+ "\\b([ABCD])\\b"
907
+ ]
908
+ },
909
+ {
910
+ "function": "take_first"
911
+ }
912
+ ],
913
+ "group_by": {
914
+ "column": "exam_id"
915
+ }
916
+ }
917
+ ],
918
+ "should_decontaminate": true,
919
+ "doc_to_decontamination_query": "<function doc_to_text at 0x14c26cd9afc0>",
920
+ "metadata": {
921
+ "version": 1.5
922
+ }
923
+ },
924
+ "portuguese_hate_speech": {
925
+ "task": "portuguese_hate_speech",
926
+ "task_alias": "portuguese_hate_speech_binary",
927
+ "group": [
928
+ "pt_benchmark"
929
+ ],
930
+ "dataset_path": "eduagarcia/portuguese_benchmark",
931
+ "dataset_name": "Portuguese_Hate_Speech_binary",
932
+ "test_split": "test",
933
+ "fewshot_split": "train",
934
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O texto contém discurso de ódio?\nResposta:",
935
+ "doc_to_target": "{{'Sim' if label == 1 else 'Não'}}",
936
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa é classificar se o texto contém discurso de ódio ou não. Responda apenas com \"Sim\" ou \"Não\".\n\n",
937
+ "target_delimiter": " ",
938
+ "fewshot_delimiter": "\n\n",
939
+ "fewshot_config": {
940
+ "sampler": "id_sampler",
941
+ "sampler_config": {
942
+ "id_list": [
943
+ 52,
944
+ 50,
945
+ 39,
946
+ 28,
947
+ 3,
948
+ 105,
949
+ 22,
950
+ 25,
951
+ 60,
952
+ 11,
953
+ 66,
954
+ 41,
955
+ 9,
956
+ 4,
957
+ 91,
958
+ 42,
959
+ 7,
960
+ 20,
961
+ 76,
962
+ 1,
963
+ 104,
964
+ 13,
965
+ 67,
966
+ 54,
967
+ 97,
968
+ 27,
969
+ 24,
970
+ 14,
971
+ 16,
972
+ 48,
973
+ 53,
974
+ 40,
975
+ 34,
976
+ 49,
977
+ 32,
978
+ 119,
979
+ 114,
980
+ 2,
981
+ 58,
982
+ 83,
983
+ 18,
984
+ 36,
985
+ 5,
986
+ 6,
987
+ 10,
988
+ 35,
989
+ 38,
990
+ 0,
991
+ 21,
992
+ 46
993
+ ],
994
+ "id_column": "idx"
995
+ }
996
+ },
997
+ "num_fewshot": 25,
998
+ "metric_list": [
999
+ {
1000
+ "metric": "f1_macro",
1001
+ "aggregation": "f1_macro",
1002
+ "higher_is_better": true
1003
+ },
1004
+ {
1005
+ "metric": "acc",
1006
+ "aggregation": "acc",
1007
+ "higher_is_better": true
1008
+ }
1009
+ ],
1010
+ "output_type": "generate_until",
1011
+ "generation_kwargs": {
1012
+ "max_gen_toks": 32,
1013
+ "do_sample": false,
1014
+ "temperature": 0.0,
1015
+ "top_k": null,
1016
+ "top_p": null,
1017
+ "until": [
1018
+ "\n\n"
1019
+ ]
1020
+ },
1021
+ "repeats": 1,
1022
+ "filter_list": [
1023
+ {
1024
+ "name": "all",
1025
+ "filter": [
1026
+ {
1027
+ "function": "find_similar_label",
1028
+ "labels": [
1029
+ "Sim",
1030
+ "Não"
1031
+ ]
1032
+ },
1033
+ {
1034
+ "function": "take_first"
1035
+ }
1036
+ ]
1037
+ }
1038
+ ],
1039
+ "should_decontaminate": false,
1040
+ "metadata": {
1041
+ "version": 1.0
1042
+ }
1043
+ },
1044
+ "tweetsentbr": {
1045
+ "task": "tweetsentbr",
1046
+ "group": [
1047
+ "pt_benchmark"
1048
+ ],
1049
+ "dataset_path": "eduagarcia/tweetsentbr_fewshot",
1050
+ "test_split": "test",
1051
+ "fewshot_split": "train",
1052
+ "doc_to_text": "Texto: {{sentence}}\nPergunta: O sentimento do texto é Positivo, Neutro ou Negativo?\nResposta:",
1053
+ "doc_to_target": "{{'Positivo' if label == 'Positive' else ('Negativo' if label == 'Negative' else 'Neutro')}}",
1054
+ "description": "Abaixo contém o texto de tweets de usuários do Twitter em português, sua tarefa �� classificar se o sentimento do texto é Positivo, Neutro ou Negativo. Responda apenas com uma das opções.\n\n",
1055
+ "target_delimiter": " ",
1056
+ "fewshot_delimiter": "\n\n",
1057
+ "fewshot_config": {
1058
+ "sampler": "first_n"
1059
+ },
1060
+ "num_fewshot": 25,
1061
+ "metric_list": [
1062
+ {
1063
+ "metric": "f1_macro",
1064
+ "aggregation": "f1_macro",
1065
+ "higher_is_better": true
1066
+ },
1067
+ {
1068
+ "metric": "acc",
1069
+ "aggregation": "acc",
1070
+ "higher_is_better": true
1071
+ }
1072
+ ],
1073
+ "output_type": "generate_until",
1074
+ "generation_kwargs": {
1075
+ "max_gen_toks": 32,
1076
+ "do_sample": false,
1077
+ "temperature": 0.0,
1078
+ "top_k": null,
1079
+ "top_p": null,
1080
+ "until": [
1081
+ "\n\n"
1082
+ ]
1083
+ },
1084
+ "repeats": 1,
1085
+ "filter_list": [
1086
+ {
1087
+ "name": "all",
1088
+ "filter": [
1089
+ {
1090
+ "function": "find_similar_label",
1091
+ "labels": [
1092
+ "Positivo",
1093
+ "Neutro",
1094
+ "Negativo"
1095
+ ]
1096
+ },
1097
+ {
1098
+ "function": "take_first"
1099
+ }
1100
+ ]
1101
+ }
1102
+ ],
1103
+ "should_decontaminate": false,
1104
+ "metadata": {
1105
+ "version": 1.0
1106
+ }
1107
+ }
1108
+ },
1109
+ "versions": {
1110
+ "assin2_rte": 1.1,
1111
+ "assin2_sts": 1.1,
1112
+ "bluex": 1.1,
1113
+ "enem_challenge": 1.1,
1114
+ "faquad_nli": 1.1,
1115
+ "hatebr_offensive": 1.0,
1116
+ "oab_exams": 1.5,
1117
+ "portuguese_hate_speech": 1.0,
1118
+ "tweetsentbr": 1.0
1119
+ },
1120
+ "n-shot": {
1121
+ "assin2_rte": 15,
1122
+ "assin2_sts": 10,
1123
+ "bluex": 3,
1124
+ "enem_challenge": 3,
1125
+ "faquad_nli": 15,
1126
+ "hatebr_offensive": 25,
1127
+ "oab_exams": 3,
1128
+ "portuguese_hate_speech": 25,
1129
+ "tweetsentbr": 25
1130
+ },
1131
+ "model_meta": {
1132
+ "truncated": 0,
1133
+ "non_truncated": 14150,
1134
+ "padded": 0,
1135
+ "non_padded": 14150,
1136
+ "fewshots_truncated": 0,
1137
+ "has_chat_template": true,
1138
+ "chat_type": "user_assistant",
1139
+ "n_gpus": 1,
1140
+ "accelerate_num_process": null,
1141
+ "model_sha": "None",
1142
+ "model_dtype": "torch.bfloat16",
1143
+ "model_memory_footprint": 4889264960,
1144
+ "model_num_parameters": 2444628480,
1145
+ "model_is_loaded_in_4bit": null,
1146
+ "model_is_loaded_in_8bit": null,
1147
+ "model_is_quantized": null,
1148
+ "model_device": "cuda:0",
1149
+ "batch_size": 16,
1150
+ "max_length": 4096,
1151
+ "max_ctx_length": 4064,
1152
+ "max_gen_toks": 32
1153
+ },
1154
+ "task_model_meta": {
1155
+ "assin2_rte": {
1156
+ "sample_size": 2448,
1157
+ "truncated": 0,
1158
+ "non_truncated": 2448,
1159
+ "padded": 0,
1160
+ "non_padded": 2448,
1161
+ "fewshots_truncated": 0,
1162
+ "mean_seq_length": 1061.423202614379,
1163
+ "min_seq_length": 1046,
1164
+ "max_seq_length": 1100,
1165
+ "max_ctx_length": 4064,
1166
+ "max_gen_toks": 32,
1167
+ "mean_original_fewshots_size": 15.0,
1168
+ "mean_effective_fewshot_size": 15.0
1169
+ },
1170
+ "assin2_sts": {
1171
+ "sample_size": 2448,
1172
+ "truncated": 0,
1173
+ "non_truncated": 2448,
1174
+ "padded": 0,
1175
+ "non_padded": 2448,
1176
+ "fewshots_truncated": 0,
1177
+ "mean_seq_length": 747.4232026143791,
1178
+ "min_seq_length": 732,
1179
+ "max_seq_length": 786,
1180
+ "max_ctx_length": 4064,
1181
+ "max_gen_toks": 32,
1182
+ "mean_original_fewshots_size": 10.0,
1183
+ "mean_effective_fewshot_size": 10.0
1184
+ },
1185
+ "bluex": {
1186
+ "sample_size": 719,
1187
+ "truncated": 0,
1188
+ "non_truncated": 719,
1189
+ "padded": 0,
1190
+ "non_padded": 719,
1191
+ "fewshots_truncated": 0,
1192
+ "mean_seq_length": 1198.817802503477,
1193
+ "min_seq_length": 932,
1194
+ "max_seq_length": 1829,
1195
+ "max_ctx_length": 4064,
1196
+ "max_gen_toks": 32,
1197
+ "mean_original_fewshots_size": 3.0,
1198
+ "mean_effective_fewshot_size": 3.0
1199
+ },
1200
+ "enem_challenge": {
1201
+ "sample_size": 1429,
1202
+ "truncated": 0,
1203
+ "non_truncated": 1429,
1204
+ "padded": 0,
1205
+ "non_padded": 1429,
1206
+ "fewshots_truncated": 0,
1207
+ "mean_seq_length": 1035.4177746675998,
1208
+ "min_seq_length": 857,
1209
+ "max_seq_length": 2512,
1210
+ "max_ctx_length": 4064,
1211
+ "max_gen_toks": 32,
1212
+ "mean_original_fewshots_size": 3.0,
1213
+ "mean_effective_fewshot_size": 3.0
1214
+ },
1215
+ "faquad_nli": {
1216
+ "sample_size": 650,
1217
+ "truncated": 0,
1218
+ "non_truncated": 650,
1219
+ "padded": 0,
1220
+ "non_padded": 650,
1221
+ "fewshots_truncated": 0,
1222
+ "mean_seq_length": 1083.1338461538462,
1223
+ "min_seq_length": 1051,
1224
+ "max_seq_length": 1149,
1225
+ "max_ctx_length": 4064,
1226
+ "max_gen_toks": 32,
1227
+ "mean_original_fewshots_size": 15.0,
1228
+ "mean_effective_fewshot_size": 15.0
1229
+ },
1230
+ "hatebr_offensive": {
1231
+ "sample_size": 1400,
1232
+ "truncated": 0,
1233
+ "non_truncated": 1400,
1234
+ "padded": 0,
1235
+ "non_padded": 1400,
1236
+ "fewshots_truncated": 0,
1237
+ "mean_seq_length": 1090.4407142857142,
1238
+ "min_seq_length": 1075,
1239
+ "max_seq_length": 1284,
1240
+ "max_ctx_length": 4064,
1241
+ "max_gen_toks": 32,
1242
+ "mean_original_fewshots_size": 25.0,
1243
+ "mean_effective_fewshot_size": 25.0
1244
+ },
1245
+ "oab_exams": {
1246
+ "sample_size": 2195,
1247
+ "truncated": 0,
1248
+ "non_truncated": 2195,
1249
+ "padded": 0,
1250
+ "non_padded": 2195,
1251
+ "fewshots_truncated": 0,
1252
+ "mean_seq_length": 863.024145785877,
1253
+ "min_seq_length": 690,
1254
+ "max_seq_length": 1139,
1255
+ "max_ctx_length": 4064,
1256
+ "max_gen_toks": 32,
1257
+ "mean_original_fewshots_size": 3.0,
1258
+ "mean_effective_fewshot_size": 3.0
1259
+ },
1260
+ "portuguese_hate_speech": {
1261
+ "sample_size": 851,
1262
+ "truncated": 0,
1263
+ "non_truncated": 851,
1264
+ "padded": 0,
1265
+ "non_padded": 851,
1266
+ "fewshots_truncated": 0,
1267
+ "mean_seq_length": 1442.021151586369,
1268
+ "min_seq_length": 1415,
1269
+ "max_seq_length": 1478,
1270
+ "max_ctx_length": 4064,
1271
+ "max_gen_toks": 32,
1272
+ "mean_original_fewshots_size": 25.0,
1273
+ "mean_effective_fewshot_size": 25.0
1274
+ },
1275
+ "tweetsentbr": {
1276
+ "sample_size": 2010,
1277
+ "truncated": 0,
1278
+ "non_truncated": 2010,
1279
+ "padded": 0,
1280
+ "non_padded": 2010,
1281
+ "fewshots_truncated": 0,
1282
+ "mean_seq_length": 1370.4194029850746,
1283
+ "min_seq_length": 1353,
1284
+ "max_seq_length": 1427,
1285
+ "max_ctx_length": 4064,
1286
+ "max_gen_toks": 32,
1287
+ "mean_original_fewshots_size": 25.0,
1288
+ "mean_effective_fewshot_size": 25.0
1289
+ }
1290
+ },
1291
+ "config": {
1292
+ "model": "huggingface",
1293
+ "model_args": "pretrained=/lustre/mlnvme/data/asen_hpc-mula/checkpoints-llama/slurm_job_17782345/step_42164",
1294
+ "batch_size": "auto",
1295
+ "batch_sizes": [],
1296
+ "device": "cuda:0",
1297
+ "use_cache": null,
1298
+ "limit": null,
1299
+ "bootstrap_iters": 100000,
1300
+ "gen_kwargs": null
1301
+ },
1302
+ "git_hash": null
1303
+ }