Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
194eb7d
·
verified ·
1 Parent(s): e245ca4

Upload uit_vicov19qa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_vicov19qa.py +167 -0
uit_vicov19qa.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """\
12
+ @inproceedings{thai-etal-2022-uit,
13
+ title = "{UIT}-{V}i{C}o{V}19{QA}: A Dataset for {COVID}-19 Community-based Question Answering on {V}ietnamese Language",
14
+ author = "Thai, Triet and Thao-Ha, Ngan Chu and Vo, Anh and Luu, Son",
15
+ editor = "Dita, Shirley and Trillanes, Arlene and Lucas, Rochelle Irene",
16
+ booktitle = "Proceedings of the 36th Pacific Asia Conference on Language, Information and Computation",
17
+ month = oct,
18
+ year = "2022",
19
+ address = "Manila, Philippines",
20
+ publisher = "Association for Computational Linguistics",
21
+ url = "https://aclanthology.org/2022.paclic-1.88",
22
+ pages = "801--810",
23
+ }
24
+ """
25
+ _DATASETNAME = "uit_vicov19qa"
26
+ _DESCRIPTION = """\
27
+ UIT-ViCoV19QA is the first Vietnamese community-based question answering dataset for developing question answering
28
+ systems for COVID-19. The dataset comprises 4,500 question-answer pairs collected from trusted medical sources,
29
+ with at least one answer and at most four unique paraphrased answers per question. This dataset contains 1800 questions
30
+ that have at least two answers, 700 questions have at least three answers and half of them have a maximum of four paraphrased
31
+ answers.
32
+ """
33
+ _HOMEPAGE = "https://github.com/triet2397/UIT-ViCoV19QA"
34
+ _LANGUAGES = ["vie"]
35
+ _LICENSE = Licenses.UNKNOWN.value
36
+ _PAPER_URL = "https://aclanthology.org/2022.paclic-1.88"
37
+ _LOCAL = False
38
+ _URLS = {
39
+ "train": {
40
+ "1_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/1_ans/UIT-ViCoV19QA_train.csv",
41
+ "2_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/2_ans/UIT-ViCoV19QA_train.csv",
42
+ "3_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/3_ans/UIT-ViCoV19QA_train.csv",
43
+ "4_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/4_ans/UIT-ViCoV19QA_train.csv",
44
+ },
45
+ "val": {
46
+ "1_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/1_ans/UIT-ViCoV19QA_val.csv",
47
+ "2_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/2_ans/UIT-ViCoV19QA_val.csv",
48
+ "3_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/3_ans/UIT-ViCoV19QA_val.csv",
49
+ "4_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/4_ans/UIT-ViCoV19QA_val.csv",
50
+ },
51
+ "test": {
52
+ "1_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/1_ans/UIT-ViCoV19QA_test.csv",
53
+ "2_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/2_ans/UIT-ViCoV19QA_test.csv",
54
+ "3_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/3_ans/UIT-ViCoV19QA_test.csv",
55
+ "4_ans": "https://raw.githubusercontent.com/triet2397/UIT-ViCoV19QA/main/dataset/4_ans/UIT-ViCoV19QA_test.csv",
56
+ },
57
+ }
58
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
59
+ _SOURCE_VERSION = "1.0.0"
60
+ _SEACROWD_VERSION = "2024.06.20"
61
+
62
+
63
+ class ViHealthQADataset(datasets.GeneratorBasedBuilder):
64
+ """
65
+ This is a SeaCrowed dataloader for dataset uit_vicov19qa, The dataset comprises 4,500 question-answer pairs collected from trusted medical sources,
66
+ with at least one answer and at most four unique paraphrased answers per question.
67
+ """
68
+
69
+ subsets = ["1_ans", "2_ans", "3_ans", "4_ans"]
70
+
71
+ BUILDER_CONFIGS = [
72
+ SEACrowdConfig(
73
+ name=f"{_DATASETNAME}_source",
74
+ version=datasets.Version(_SOURCE_VERSION),
75
+ description=f"{_DATASETNAME} source schema",
76
+ schema="source", subset_id=f"{_DATASETNAME}"),
77
+
78
+ SEACrowdConfig(
79
+ name=f"{_DATASETNAME}_seacrowd_qa",
80
+ version=datasets.Version(_SEACROWD_VERSION),
81
+ description=f"{_DATASETNAME} SEACrowd schema",
82
+ schema="seacrowd_qa",
83
+ subset_id=f"{_DATASETNAME}",
84
+ )
85
+ ]
86
+
87
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
88
+
89
+ def _info(self) -> datasets.DatasetInfo:
90
+
91
+ if self.config.schema == "source":
92
+ features = datasets.Features(
93
+ {
94
+ "id": datasets.Value("string"),
95
+ "question": datasets.Value("string"),
96
+ "answers": datasets.Value("string"),
97
+ }
98
+ )
99
+ elif self.config.schema == "seacrowd_qa":
100
+ features = schemas.qa_features
101
+ else:
102
+ raise ValueError(f"No schema matched for {self.config.schema}")
103
+
104
+ return datasets.DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ homepage=_HOMEPAGE,
108
+ license=_LICENSE,
109
+ citation=_CITATION,
110
+ )
111
+
112
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
113
+ """Returns SplitGenerators."""
114
+
115
+ data_dir = dl_manager.download_and_extract(_URLS)
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={
120
+ "filepath": data_dir["train"],
121
+ "split": "train",
122
+ },
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={
127
+ "filepath": data_dir["val"],
128
+ "split": "val",
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ gen_kwargs={
134
+ "filepath": data_dir["test"],
135
+ "split": "test",
136
+ },
137
+ ),
138
+ ]
139
+
140
+ def _generate_examples(self, filepath: Dict, split: str) -> Tuple[int, Dict]:
141
+ """Yields examples as (key, example) tuples."""
142
+ print(f"Generating examples for split {split}")
143
+ sample_id = -1
144
+ for path in filepath.values():
145
+ raw_examples = pd.read_csv(path, na_filter=False, delimiter="|")
146
+ for eid, exam in raw_examples.iterrows():
147
+ sample_id += 1
148
+ exam_id = exam[0]
149
+ exam_quest = exam[1]
150
+ exam_answers = exam[2:].values
151
+ if self.config.schema == "source":
152
+ yield sample_id, {"id": str(exam_id),
153
+ "question": exam_quest,
154
+ "answers": exam_answers
155
+ }
156
+
157
+ elif self.config.schema == "seacrowd_qa":
158
+ yield sample_id, {"id": str(sample_id),
159
+ "question_id": exam_id,
160
+ "document_id": str(sample_id),
161
+ "question": exam_quest,
162
+ "type": None,
163
+ "choices": [],
164
+ "context": None,
165
+ "answer": exam_answers,
166
+ "meta": {}
167
+ }