Punchwe commited on
Commit
38768cf
·
verified ·
1 Parent(s): b328140

Upload tatoeba-challenge.py

Browse files
Files changed (1) hide show
  1. tatoeba-challenge.py +142 -0
tatoeba-challenge.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ import gzip
5
+
6
+ _CITATION = """\
7
+ @inproceedings{tiedemann-2020-tatoeba,
8
+ title = "The {T}atoeba {T}ranslation {C}hallenge {--} {R}ealistic Data Sets for Low Resource and Multilingual {MT}",
9
+ author = {Tiedemann, J{\"o}rg},
10
+ booktitle = "Proceedings of the Fifth Conference on Machine Translation",
11
+ month = nov,
12
+ year = "2020",
13
+ address = "Online",
14
+ publisher = "Association for Computational Linguistics",
15
+ url = "https://www.aclweb.org/anthology/2020.wmt-1.139",
16
+ pages = "1174--1182"
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ The Tatoeba Translation Challenge is a multilingual data set of
22
+ machine translation benchmarks derived from user-contributed
23
+ translations collected by [Tatoeba.org](https://tatoeba.org/) and
24
+ provided as parallel corpus from [OPUS](https://opus.nlpl.eu/). This
25
+ dataset includes test and development data sorted by language pair. It
26
+ includes test sets for hundreds of language pairs and is continuously
27
+ updated. Please, check the version number tag to refer to the release
28
+ that your are using.
29
+ """
30
+
31
+ _VERSION = "2023.09.26"
32
+ _DATE = "v" + "-".join(s.zfill(2) for s in _VERSION.split("."))
33
+ _BASE_URL = "https://object.pouta.csc.fi/Tatoeba-Challenge-{}/{}-{}.tar"
34
+
35
+
36
+ _LANGUAGE_PAIRS = [
37
+ ("eng", "fra"),
38
+ ("deu", "eng"),
39
+ ("eng", "nld"),
40
+ ("deu", "nld"),
41
+ ("fra", "nld"),
42
+ ("deu", "fra"),
43
+ ]
44
+
45
+
46
+ class TatoebaConfig(datasets.BuilderConfig):
47
+ def __init__(self, *args, lang1=None, lang2=None, date=_DATE, **kwargs):
48
+ super().__init__(
49
+ *args,
50
+ name=f"{lang1}-{lang2}",
51
+ **kwargs,
52
+ )
53
+ self.lang1 = lang1
54
+ self.lang2 = lang2
55
+ self.date = date
56
+
57
+
58
+ class Tatoeba(datasets.GeneratorBasedBuilder):
59
+ BUILDER_CONFIGS = [
60
+ TatoebaConfig(
61
+ lang1=lang1,
62
+ lang2=lang2,
63
+ description=f"Translating {lang1} to {lang2} or vice versa",
64
+ version=datasets.Version(_VERSION),
65
+ )
66
+ for lang1, lang2 in _LANGUAGE_PAIRS
67
+ ]
68
+ BUILDER_CONFIG_CLASS = TatoebaConfig
69
+
70
+ def _info(self):
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=datasets.Features(
74
+ {
75
+ "id": datasets.Value("string"),
76
+ "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
77
+ },
78
+ ),
79
+ supervised_keys=None,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ def _base_url(lang1, lang2, date):
85
+ return _BASE_URL.format(date, lang1, lang2)
86
+
87
+ download_url = _base_url(self.config.lang1, self.config.lang2, self.config.date)
88
+ path = dl_manager.download_and_extract(download_url)
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN,
92
+ gen_kwargs={"datapath": path, "split":"train"},
93
+ ),
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.VALIDATION,
96
+ gen_kwargs={"datapath": path, "split":"dev"},
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TEST,
100
+ gen_kwargs={"datapath": path, "split":"test"},
101
+ ),
102
+ ]
103
+
104
+ def _generate_examples(self, datapath, split):
105
+ l1, l2 = self.config.lang1, self.config.lang2
106
+ folder = os.path.join(datapath, "data/release/"+_DATE+"/"+l1+"-"+l2)
107
+
108
+ if "train" in split:
109
+ l1_file = "train.src.gz"
110
+ l2_file = "train.trg.gz"
111
+ l1_path = os.path.join(folder, l1_file)
112
+ l2_path = os.path.join(folder, l2_file)
113
+ with gzip.open(l1_path, "r") as f1, gzip.open(l2_path, "r") as f2:
114
+ for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
115
+ if sentence_counter >= 3000000:
116
+ break
117
+ x = x.strip()
118
+ y = y.strip()
119
+ result = (sentence_counter, {"id": str(sentence_counter), "translation": {l1: x, l2: y},},)
120
+ yield result
121
+
122
+ else:
123
+ if "test" in split:
124
+ l1_file = "test.src"
125
+ l2_file = "test.trg"
126
+ else:
127
+ l1_file = "dev.src"
128
+ l2_file = "dev.trg"
129
+ l1_path = os.path.join(folder, l1_file)
130
+ l2_path = os.path.join(folder, l2_file)
131
+ with open(l1_path, "r", encoding="utf-8") as f1, open(l2_path, "r", encoding="utf-8") as f2:
132
+ for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
133
+ x = x.strip()
134
+ y = y.strip()
135
+ result = (
136
+ sentence_counter,
137
+ {
138
+ "id": str(sentence_counter),
139
+ "translation": {l1: x, l2: y},
140
+ },
141
+ )
142
+ yield result