Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
b1bafdb
1 Parent(s): ab53323

Upload medev.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. medev.py +165 -0
medev.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A high-quality Vietnamese-English parallel dataset constructed specifically for the medical domain, comprising approximately 360K sentence pairs
18
+ """
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from seacrowd.utils import schemas
25
+ from seacrowd.utils.configs import SEACrowdConfig
26
+ from seacrowd.utils.constants import Licenses, Tasks
27
+
28
+ _CITATION = """\
29
+ @inproceedings{medev,
30
+ title = {{Improving Vietnamese-English Medical Machine Translation}},
31
+ author = {Nhu Vo and Dat Quoc Nguyen and Dung D. Le and Massimo Piccardi and Wray Buntine},
32
+ booktitle = {Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING)},
33
+ year = {2024}
34
+ }
35
+ """
36
+
37
+ _DATASETNAME = "medev"
38
+
39
+ _DESCRIPTION = """\
40
+ A high-quality Vietnamese-English parallel dataset constructed specifically for the medical domain, comprising approximately 360K sentence pairs
41
+ """
42
+
43
+ _HOMEPAGE = "https://huggingface.co/datasets/nhuvo/MedEV"
44
+
45
+ _LANGUAGES = ["vie"]
46
+
47
+ _LICENSE = Licenses.UNKNOWN.value
48
+
49
+ _LOCAL = False
50
+
51
+ _URLS = {
52
+ "train_en": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/train.en.txt?download=true",
53
+ "train_vie": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/train.vi.txt?download=true",
54
+ "val_en": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/val.en.new.txt?download=true",
55
+ "val_vie": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/val.vi.new.txt?download=true",
56
+ "test_en": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/test.en.new.txt?download=true",
57
+ "test_vie": "https://huggingface.co/datasets/nhuvo/MedEV/resolve/main/test.vi.new.txt?download=true",
58
+ }
59
+
60
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ class MedEVDataset(datasets.GeneratorBasedBuilder):
68
+ """A high-quality Vietnamese-English parallel dataset constructed specifically for the medical domain, comprising approximately 360K sentence pairs"""
69
+
70
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
71
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
72
+
73
+ BUILDER_CONFIGS = [
74
+ SEACrowdConfig(
75
+ name=f"{_DATASETNAME}_source",
76
+ version=SOURCE_VERSION,
77
+ description=f"{_DATASETNAME} source schema",
78
+ schema="source",
79
+ subset_id=_DATASETNAME,
80
+ ),
81
+ SEACrowdConfig(
82
+ name=f"{_DATASETNAME}_seacrowd_t2t",
83
+ version=SEACROWD_VERSION,
84
+ description=f"{_DATASETNAME} SEACrowd schema",
85
+ schema="seacrowd_t2t",
86
+ subset_id=_DATASETNAME,
87
+ ),
88
+ ]
89
+
90
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
91
+
92
+ def _info(self) -> datasets.DatasetInfo:
93
+
94
+ if self.config.schema == "source":
95
+ features = datasets.Features(
96
+ {
97
+ "id": datasets.Value("string"),
98
+ "vie_text": datasets.Value("string"),
99
+ "eng_text": datasets.Value("string"),
100
+ }
101
+ )
102
+
103
+ elif self.config.schema == "seacrowd_t2t":
104
+ features = schemas.text2text_features
105
+
106
+ return datasets.DatasetInfo(
107
+ description=_DESCRIPTION,
108
+ features=features,
109
+ homepage=_HOMEPAGE,
110
+ license=_LICENSE,
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
115
+ """Returns SplitGenerators."""
116
+ data_dir = dl_manager.download_and_extract(_URLS)
117
+
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ gen_kwargs={
122
+ "filepath_en": data_dir["train_en"],
123
+ "filepath_vie": data_dir["train_vie"],
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TEST,
128
+ gen_kwargs={
129
+ "filepath_en": data_dir["test_en"],
130
+ "filepath_vie": data_dir["test_vie"],
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.VALIDATION,
135
+ gen_kwargs={
136
+ "filepath_en": data_dir["val_en"],
137
+ "filepath_vie": data_dir["val_vie"],
138
+ },
139
+ ),
140
+ ]
141
+
142
+ def _generate_examples(self, filepath_en: Path, filepath_vie: Path) -> Tuple[int, Dict]:
143
+ """Yields examples as (key, example) tuples."""
144
+ with open(filepath_en, "r", encoding="utf-8") as f:
145
+ en_lines = f.readlines()
146
+ with open(filepath_vie, "r", encoding="utf-8") as f:
147
+ vie_lines = f.readlines()
148
+
149
+ if self.config.schema == "source":
150
+ for i in range(len(vie_lines)):
151
+ yield i, {
152
+ "id": str(i),
153
+ "vie_text": vie_lines[i],
154
+ "eng_text": en_lines[i],
155
+ }
156
+
157
+ elif self.config.schema == "seacrowd_t2t":
158
+ for i, (en_line, vie_line) in enumerate(list(zip(en_lines, vie_lines))):
159
+ yield i, {
160
+ "id": str(i),
161
+ "text_1": en_line,
162
+ "text_2": vie_line,
163
+ "text_1_name": "eng",
164
+ "text_2_name": "vie",
165
+ }