holylovenia commited on
Commit
7deeecb
·
1 Parent(s): 445be15

Upload minangnlp_mt.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. minangnlp_mt.py +159 -0
minangnlp_mt.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+
6
+ from nusacrowd.utils import schemas
7
+ from nusacrowd.utils.configs import NusantaraConfig
8
+ from nusacrowd.utils.constants import Tasks
9
+
10
+ _CITATION = """\
11
+ @inproceedings{koto-koto-2020-towards,
12
+ title = "Towards Computational Linguistics in {M}inangkabau Language: Studies on Sentiment Analysis and Machine Translation",
13
+ author = "Koto, Fajri and
14
+ Koto, Ikhwan",
15
+ booktitle = "Proceedings of the 34th Pacific Asia Conference on Language, Information and Computation",
16
+ month = oct,
17
+ year = "2020",
18
+ address = "Hanoi, Vietnam",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://aclanthology.org/2020.paclic-1.17",
21
+ pages = "138--148",
22
+ }
23
+ """
24
+
25
+ _LOCAL = False
26
+ _LANGUAGES = ["min", "ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
27
+ _DATASETNAME = "minangnlp_mt"
28
+ _DESCRIPTION = """\
29
+ In this work, we create Minangkabau–Indonesian (MIN-ID) parallel corpus by using Wikipedia. We obtain 224,180 Minangkabau and
30
+ 510,258 Indonesian articles, and align documents through title matching, resulting in 111,430 MINID document pairs.
31
+ After that, we do sentence segmentation based on simple punctuation heuristics and obtain 4,323,315 Minangkabau sentences. We
32
+ then use the bilingual dictionary to translate Minangkabau article (MIN) into Indonesian language (ID'). Sentence alignment is conducted using
33
+ ROUGE-1 (F1) score (unigram overlap) (Lin, 2004) between ID’ and ID, and we pair each MIN sentencewith an ID sentence based on the highest ROUGE1.
34
+ We then discard sentence pairs with a score of less than 0.5 to result in 345,146 MIN-ID parallel sentences.
35
+ We observe that the sentence pattern in the collection is highly repetitive (e.g. 100k sentences are about biological term definition). Therefore,
36
+ we conduct final filtering based on top-1000 trigram by iteratively discarding sentences until the frequency of each trigram equals to 100. Finally, we
37
+ obtain 16,371 MIN-ID parallel sentences and conducted manual evaluation by asking two native Minangkabau speakers to assess the adequacy and
38
+ fluency (Koehn and Monz, 2006). The human judgement is based on scale 1–5 (1 means poor quality and 5 otherwise) and conducted against 100 random
39
+ samples. We average the weights of two annotators before computing the overall score, and we achieve 4.98 and 4.87 for adequacy and fluency respectively.
40
+ This indicates that the resulting corpus is high-quality for machine translation training.
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/fajri91/minangNLP"
44
+ _LICENSE = "MIT"
45
+ _URLS = {
46
+ _DATASETNAME: "https://github.com/fajri91/minangNLP/archive/refs/heads/master.zip",
47
+ }
48
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
49
+ # Dataset does not have versioning
50
+ _SOURCE_VERSION = "1.0.0"
51
+ _NUSANTARA_VERSION = "1.0.0"
52
+
53
+
54
+ class MinangNLPmt(datasets.GeneratorBasedBuilder):
55
+ """16,371-size parallel Minangkabau-Indonesian sentence pairs."""
56
+
57
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
58
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
59
+
60
+ BUILDER_CONFIGS = [
61
+ NusantaraConfig(
62
+ name="minangnlp_mt_source",
63
+ version=SOURCE_VERSION,
64
+ description="MinangNLP Machine Translation source schema",
65
+ schema="source",
66
+ subset_id="minangnlp_mt",
67
+ ),
68
+ NusantaraConfig(
69
+ name="minangnlp_mt_nusantara_t2t",
70
+ version=NUSANTARA_VERSION,
71
+ description="MinangNLP Machine Translation Nusantara schema",
72
+ schema="nusantara_t2t",
73
+ subset_id="minangnlp_mt",
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = "minangnlp_mt_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+ if self.config.schema == "source":
81
+ features = datasets.Features(
82
+ {
83
+ "id": datasets.Value("string"),
84
+ "src": datasets.Value("string"),
85
+ "tgt": datasets.Value("string"),
86
+ }
87
+ )
88
+ elif self.config.schema == "nusantara_t2t":
89
+ features = schemas.text2text_features
90
+
91
+ return datasets.DatasetInfo(
92
+ description=_DESCRIPTION,
93
+ features=features,
94
+ homepage=_HOMEPAGE,
95
+ license=_LICENSE,
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
100
+ """Returns SplitGenerators."""
101
+ urls = _URLS[_DATASETNAME]
102
+ data_dir = Path(dl_manager.download_and_extract(urls)) / "minangNLP-master" / "translation" / "wiki_data"
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={
107
+ "src_filepath": data_dir / "src_train.txt",
108
+ "tgt_filepath": data_dir / "tgt_train.txt",
109
+ "split": "train",
110
+ },
111
+ ),
112
+ datasets.SplitGenerator(
113
+ name=datasets.Split.TEST,
114
+ gen_kwargs={
115
+ "src_filepath": data_dir / "src_test.txt",
116
+ "tgt_filepath": data_dir / "tgt_test.txt",
117
+ "split": "test",
118
+ },
119
+ ),
120
+ # Dataset has a secondary test split
121
+ # datasets.SplitGenerator(
122
+ # name=datasets.Split.TEST,
123
+ # gen_kwargs={
124
+ # "src_filepath": data_dir / "src_test_sent.txt",
125
+ # "tgt_filepath": data_dir / "tgt_test_sent.txt",
126
+ # "split": "test_sent",
127
+ # },
128
+ # ),
129
+ datasets.SplitGenerator(
130
+ name=datasets.Split.VALIDATION,
131
+ gen_kwargs={
132
+ "src_filepath": data_dir / "src_dev.txt",
133
+ "tgt_filepath": data_dir / "tgt_dev.txt",
134
+ "split": "dev",
135
+ },
136
+ ),
137
+ ]
138
+
139
+ def _generate_examples(self, src_filepath: Path, tgt_filepath: Path, split: str) -> Tuple[int, Dict]:
140
+ with open(src_filepath, encoding="utf-8") as fsrc, open(tgt_filepath, encoding="utf-8") as ftgt:
141
+ for idx, pair in enumerate(zip(fsrc, ftgt)):
142
+ src, tgt = pair
143
+ if self.config.schema == "source":
144
+ row = {
145
+ "id": str(idx),
146
+ "src": src,
147
+ "tgt": tgt,
148
+ }
149
+ yield idx, row
150
+
151
+ elif self.config.schema == "nusantara_t2t":
152
+ row = {
153
+ "id": str(idx),
154
+ "text_1": src,
155
+ "text_2": tgt,
156
+ "text_1_name": "min",
157
+ "text_2_name": "id",
158
+ }
159
+ yield idx, row