Datasets:
OALL
/

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
File size: 10,855 Bytes
f01d54e
 
400c508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2b1b66c
 
 
 
 
 
 
400c508
4f6ef6e
f01d54e
 
 
 
 
 
 
 
 
 
 
 
 
 
f62d446
 
 
 
 
 
 
f01d54e
4f6ef6e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b79334
 
 
 
 
 
 
4f6ef6e
46f2fe2
 
 
 
 
 
 
 
 
 
 
cb956eb
 
 
 
 
 
 
46f2fe2
463645b
 
 
 
 
 
 
 
 
 
 
a23266d
 
 
 
 
 
 
463645b
b84c30b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d13a746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a69bf1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
961f38b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35dc6a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e802803
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f01d54e
400c508
 
2b1b66c
 
 
 
f01d54e
 
f62d446
 
 
 
4f6ef6e
 
9b79334
 
 
 
46f2fe2
 
cb956eb
 
 
 
463645b
 
a23266d
 
 
 
b84c30b
 
 
 
d13a746
 
 
 
a69bf1c
 
 
 
961f38b
 
 
 
35dc6a9
 
 
 
e802803
 
 
 
f01d54e
164a57d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
---
dataset_info:
- config_name: mcq_exams_test_ar
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: test
    num_bytes: 152003
    num_examples: 557
  - name: validation
    num_bytes: 1135
    num_examples: 5
  download_size: 92764
  dataset_size: 153138
- config_name: meta_ar_dialects
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: test
    num_bytes: 5612859
    num_examples: 5395
  - name: validation
    num_bytes: 4919
    num_examples: 5
  download_size: 2174106
  dataset_size: 5617778
- config_name: meta_ar_msa
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: test
    num_bytes: 948833
    num_examples: 895
  - name: validation
    num_bytes: 5413
    num_examples: 5
  download_size: 380941
  dataset_size: 954246
- config_name: multiple_choice_copa_translated_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: test
    num_bytes: 11904
    num_examples: 84
  - name: validation
    num_bytes: 848
    num_examples: 5
  download_size: 13056
  dataset_size: 12752
- config_name: multiple_choice_facts_truefalse_balanced_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: test
    num_bytes: 121221
    num_examples: 75
  - name: validation
    num_bytes: 7919
    num_examples: 5
  download_size: 79171
  dataset_size: 129140
- config_name: multiple_choice_grounded_statement_soqal_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: sol5
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 161956
    num_examples: 155
  download_size: 59090
  dataset_size: 161956
- config_name: multiple_choice_grounded_statement_xglue_mlqa_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: sol5
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 146071
    num_examples: 155
  download_size: 77150
  dataset_size: 146071
- config_name: multiple_choice_openbookqa_translated_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: sol4
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 71543
    num_examples: 336
  download_size: 44973
  dataset_size: 71543
- config_name: multiple_choice_rating_sentiment_no_neutral_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 1408389
    num_examples: 8000
  download_size: 481296
  dataset_size: 1408389
- config_name: multiple_choice_rating_sentiment_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 1219534
    num_examples: 6000
  download_size: 375276
  dataset_size: 1219534
- config_name: multiple_choice_sentiment_task
  features:
  - name: query
    dtype: string
  - name: sol1
    dtype: string
  - name: sol2
    dtype: string
  - name: sol3
    dtype: string
  - name: label
    dtype: string
  splits:
  - name: train
    num_bytes: 457756
    num_examples: 1725
  download_size: 185976
  dataset_size: 457756
configs:
- config_name: mcq_exams_test_ar
  data_files:
  - split: test
    path: mcq_exams_test_ar/test-*
  - split: validation
    path: mcq_exams_test_ar/validation-*
- config_name: meta_ar_dialects
  data_files:
  - split: test
    path: meta_ar_dialects/test-*
  - split: validation
    path: meta_ar_dialects/validation-*
- config_name: meta_ar_msa
  data_files:
  - split: test
    path: meta_ar_msa/test-*
  - split: validation
    path: meta_ar_msa/validation-*
- config_name: multiple_choice_copa_translated_task
  data_files:
  - split: test
    path: multiple_choice_copa_translated_task/test-*
  - split: validation
    path: multiple_choice_copa_translated_task/validation-*
- config_name: multiple_choice_facts_truefalse_balanced_task
  data_files:
  - split: test
    path: multiple_choice_facts_truefalse_balanced_task/test-*
  - split: validation
    path: multiple_choice_facts_truefalse_balanced_task/validation-*
- config_name: multiple_choice_grounded_statement_soqal_task
  data_files:
  - split: train
    path: multiple_choice_grounded_statement_soqal_task/train-*
- config_name: multiple_choice_grounded_statement_xglue_mlqa_task
  data_files:
  - split: train
    path: multiple_choice_grounded_statement_xglue_mlqa_task/train-*
- config_name: multiple_choice_openbookqa_translated_task
  data_files:
  - split: train
    path: multiple_choice_openbookqa_translated_task/train-*
- config_name: multiple_choice_rating_sentiment_no_neutral_task
  data_files:
  - split: train
    path: multiple_choice_rating_sentiment_no_neutral_task/train-*
- config_name: multiple_choice_rating_sentiment_task
  data_files:
  - split: train
    path: multiple_choice_rating_sentiment_task/train-*
- config_name: multiple_choice_sentiment_task
  data_files:
  - split: train
    path: multiple_choice_sentiment_task/train-*
---
# AlGhafa Arabic LLM Benchmark

### New fix: Normalized whitespace characters and ensured consistency across all datasets for improved data quality and compatibility.

Multiple-choice evaluation benchmark for zero- and few-shot evaluation of Arabic LLMs, we adapt the following tasks:

- Belebele Ar MSA [Bandarkar et al. (2023)](https://arxiv.org/abs/2308.16884): 900 entries
- Belebele Ar Dialects [Bandarkar et al. (2023)](https://arxiv.org/abs/2308.16884): 5400 entries
- COPA Ar: 89 entries machine-translated from English [COPA](https://people.ict.usc.edu/~gordon/copa.html) and verified by native Arabic speakers.
- Facts balanced (based on AraFacts) [Sheikh Ali et al. (2021)](https://aclanthology.org/2021.wanlp-1.26): 80 entries (after balancing dataset), consisting of a short article and a corresponding claim, to be deemed true or false
- MCQ Exams Ar [Hardalov et al. (2020)](https://aclanthology.org/2020.emnlp-main.438): 2248 entries
- OpenbookQA Ar: 336 entries. Machine-translated from English [OpenbookQA](https://api.semanticscholar.org/CorpusID:52183757) and verified native Arabic speakers.
- Rating sentiment (HARD-Arabic-Dataset) [Elnagar et al. (2018)](https://link.springer.com/chapter/10.1007/978-3-319-67056-0_3): determine the sentiment
  of reviews, with 3 possible categories (positive, neutral, negative) transformed to a review score (1-5) as follows: 1-2 negative, 3 neutral, 4-5 positive; 6000 entries (2000 for each of the three classes)
- Rating sentiment no neutral (HARD-Arabic-Dataset) [Elnagar et al., 2018](https://link.springer.com/chapter/10.1007/978-3-319-67056-0_3): 8000 entries in which we remove the neutral class by extending the positive class (corresponding to scores 1-3); 8000 entries (4000 for each class)
- Sentiment [Abu Farha et al., 2021](https://aclanthology.org/2021.wanlp-1.36): 1725 entries based on Twitter posts, that can be classified as positive, negative, or neutral
- SOQAL [Mozannar et al., 2019](https://aclanthology.org/W19-4612): grounded statement task to assess in-context reading comprehension, consisting of a context and a related question; consists of 155 entries with one original correct answer, transformed to multiple choice task by adding four possible
  human-curated incorrect choices per sample
- XGLUE (based on XGLUE-MLQA) [Liang et al., 2020](https://arxiv.org/abs/2004.01401); [Lewis et al., 2019](https://arxiv.org/abs/1910.07475): consists of
  155 entries transformed to a multiple choice task by adding four human-curated incorrect choices per sample


## Citing the AlGhafa benchmark:

```bibtex
@inproceedings{almazrouei-etal-2023-alghafa,
    title = "{A}l{G}hafa Evaluation Benchmark for {A}rabic Language Models",
    author = "Almazrouei, Ebtesam  and
      Cojocaru, Ruxandra  and
      Baldo, Michele  and
      Malartic, Quentin  and
      Alobeidli, Hamza  and
      Mazzotta, Daniele  and
      Penedo, Guilherme  and
      Campesan, Giulia  and
      Farooq, Mugariya  and
      Alhammadi, Maitha  and
      Launay, Julien  and
      Noune, Badreddine",
    editor = "Sawaf, Hassan  and
      El-Beltagy, Samhaa  and
      Zaghouani, Wajdi  and
      Magdy, Walid  and
      Abdelali, Ahmed  and
      Tomeh, Nadi  and
      Abu Farha, Ibrahim  and
      Habash, Nizar  and
      Khalifa, Salam  and
      Keleg, Amr  and
      Haddad, Hatem  and
      Zitouni, Imed  and
      Mrini, Khalil  and
      Almatham, Rawan",
    booktitle = "Proceedings of ArabicNLP 2023",
    month = dec,
    year = "2023",
    address = "Singapore (Hybrid)",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2023.arabicnlp-1.21",
    doi = "10.18653/v1/2023.arabicnlp-1.21",
    pages = "244--275",
    abstract = "Recent advances in the space of Arabic large language models have opened up a wealth of potential practical applications. From optimal training strategies, large scale data acquisition and continuously increasing NLP resources, the Arabic LLM landscape has improved in a very short span of time, despite being plagued by training data scarcity and limited evaluation resources compared to English. In line with contributing towards this ever-growing field, we introduce AlGhafa, a new multiple-choice evaluation benchmark for Arabic LLMs. For showcasing purposes, we train a new suite of models, including a 14 billion parameter model, the largest monolingual Arabic decoder-only model to date. We use a collection of publicly available datasets, as well as a newly introduced HandMade dataset consisting of 8 billion tokens. Finally, we explore the quantitative and qualitative toxicity of several Arabic models, comparing our models to existing public Arabic LLMs.",
}
```