Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
File size: 3,069 Bytes
c0fe166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44e95a5
fe7397a
44e95a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
---
dataset_info:
  features:
  - name: id
    dtype: string
  - name: input
    dtype: string
  - name: target
    dtype: string
  splits:
  - name: train
    num_bytes: 13402
    num_examples: 35
  - name: validation
    num_bytes: 291387
    num_examples: 765
  - name: test
    num_bytes: 1414977
    num_examples: 3722
  download_size: 730958
  dataset_size: 1719766
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
  - split: validation
    path: data/validation-*
  - split: test
    path: data/test-*
---
# ViGEText_17to23 dataset
Evaluating the Symbol Binding Ability of Large Language Models for Multiple-Choice Questions in Vietnamese General Education: https://github.com/uitnlp/ViGEText_17to23

```
@inproceedings{10.1145/3628797.3628837,
author = {Nguyen, Duc-Vu and Nguyen, Quoc-Nam},
title = {Evaluating the Symbol Binding Ability of Large Language Models for Multiple-Choice Questions in Vietnamese General Education},
year = {2023},
isbn = {9798400708916},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3628797.3628837},
doi = {10.1145/3628797.3628837},
abstract = {In this paper, we evaluate the ability of large language models (LLMs) to perform multiple choice symbol binding (MCSB) for multiple choice question answering (MCQA) tasks in zero-shot, one-shot, and few-shot settings. We focus on Vietnamese, with fewer challenging MCQA datasets than in English. The two existing datasets, ViMMRC 1.0 and ViMMRC 2.0, focus on literature. Recent research in Vietnamese natural language processing (NLP) has focused on the Vietnamese National High School Graduation Examination (VNHSGE) from 2019 to 2023 to evaluate ChatGPT. However, these studies have mainly focused on how ChatGPT solves the VNHSGE step by step. We aim to create a novel and high-quality dataset by providing structured guidelines for typing LaTeX formulas for mathematics, physics, chemistry, and biology. This dataset can be used to evaluate the MCSB ability of LLMs and smaller language models (LMs) because it is typed in a strict LaTeX style. We determine the most probable character answer (A, B, C, or D) based on context, instead of finding the answer step by step as in previous Vietnamese works. This reduces computational costs and accelerates the evaluation of LLMs. Our evaluation of six well-known LLMs, namely BLOOMZ-7.1B-MT, LLaMA-2-7B, LLaMA-2-70B, GPT-3, GPT-3.5, and GPT-4.0, on the ViMMRC 1.0 and ViMMRC 2.0 benchmarks and our proposed dataset shows promising results on the MCSB ability of LLMs for Vietnamese. The dataset is available1 for research purposes only.},
booktitle = {Proceedings of the 12th International Symposium on Information and Communication Technology},
pages = {379–386},
numpages = {8},
keywords = {Analysis of Language Models, Multiple Choice Symbol Binding, Multiple Choice Question Answering, Language Modeling},
location = {<conf-loc>, <city>Ho Chi Minh</city>, <country>Vietnam</country>, </conf-loc>},
series = {SOICT '23}
}
```