Update IN22-Conv.py
#2
by
akash323
- opened
- IN22-Conv.py +29 -36
IN22-Conv.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
# coding=utf-8
|
2 |
-
"""The IN-22
|
3 |
|
4 |
import os
|
5 |
import sys
|
@@ -19,8 +19,7 @@ _CITATION = """
|
|
19 |
|
20 |
_DESCRIPTION = """\
|
21 |
IN-22 is a newly created comprehensive benchmark for evaluating machine translation performance in multi-domain, n-way parallel contexts across 22 Indic languages.
|
22 |
-
IN22-
|
23 |
-
Currently, we use it for sentence-level evaluation of MT systems but can be repurposed for document translation evaluation as well.
|
24 |
"""
|
25 |
|
26 |
_HOMEPAGE = "https://github.com/AI4Bharat/IndicTrans2"
|
@@ -40,7 +39,7 @@ _LANGUAGES = [
|
|
40 |
|
41 |
_URL = "https://indictrans2-public.objectstore.e2enetworks.net/IN22_benchmark.tar.gz"
|
42 |
|
43 |
-
_SPLITS = ["
|
44 |
|
45 |
_SENTENCES_PATHS = {
|
46 |
lang: {
|
@@ -64,8 +63,8 @@ def _pairings(iterable, r=2):
|
|
64 |
yield p
|
65 |
|
66 |
|
67 |
-
class
|
68 |
-
"""BuilderConfig for the IN-22
|
69 |
def __init__(self, lang: str, lang2: str = None, **kwargs):
|
70 |
"""
|
71 |
Args:
|
@@ -76,24 +75,24 @@ class IN22ConvConfig(datasets.BuilderConfig):
|
|
76 |
self.lang2 = lang2
|
77 |
|
78 |
|
79 |
-
class
|
80 |
-
"""IN-22
|
81 |
|
82 |
BUILDER_CONFIGS = [
|
83 |
-
|
84 |
name=lang,
|
85 |
description=f"IN-22: {lang} subset.",
|
86 |
lang=lang
|
87 |
)
|
88 |
for lang in _LANGUAGES
|
89 |
] + [
|
90 |
-
|
91 |
name="all",
|
92 |
description=f"IN-22: all language pairs",
|
93 |
lang=None
|
94 |
)
|
95 |
] + [
|
96 |
-
|
97 |
name=f"{l1}-{l2}",
|
98 |
description=f"IN-22: {l1}-{l2} aligned subset.",
|
99 |
lang=l1,
|
@@ -104,14 +103,12 @@ class IN22Conv(datasets.GeneratorBasedBuilder):
|
|
104 |
def _info(self):
|
105 |
features = {
|
106 |
"id": datasets.Value("int32"),
|
107 |
-
"
|
108 |
-
"
|
109 |
-
"
|
110 |
"domain": datasets.Value("string"),
|
111 |
-
"
|
112 |
-
"
|
113 |
-
"speaker": datasets.Value("int32"),
|
114 |
-
"turn": datasets.Value("int32")
|
115 |
}
|
116 |
if self.config.name != "all" and "-" not in self.config.name:
|
117 |
features["sentence"] = datasets.Value("string")
|
@@ -153,8 +150,8 @@ class IN22Conv(datasets.GeneratorBasedBuilder):
|
|
153 |
def _generate_examples(self, sentence_paths: Union[str, List[str]], metadata_path: str, langs: Optional[List[str]] = None):
|
154 |
"""Yields examples as (key, example) tuples."""
|
155 |
if isinstance(sentence_paths, str):
|
156 |
-
with open(sentence_paths, "r") as sentences_file:
|
157 |
-
with open(metadata_path, "r") as metadata_file:
|
158 |
metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
|
159 |
for id_, (sentence, metadata) in enumerate(
|
160 |
zip(sentences_file, metadata_lines)
|
@@ -164,14 +161,12 @@ class IN22Conv(datasets.GeneratorBasedBuilder):
|
|
164 |
yield id_, {
|
165 |
"id": id_ + 1,
|
166 |
"sentence": sentence,
|
167 |
-
"
|
168 |
-
"
|
169 |
-
"
|
170 |
"domain": metadata[3],
|
171 |
-
"
|
172 |
-
"
|
173 |
-
"speaker": metadata[6],
|
174 |
-
"turn": metadata[7]
|
175 |
}
|
176 |
else:
|
177 |
sentences = {}
|
@@ -180,23 +175,21 @@ class IN22Conv(datasets.GeneratorBasedBuilder):
|
|
180 |
else:
|
181 |
langs = [self.config.lang, self.config.lang2]
|
182 |
for path, lang in zip(sentence_paths, langs):
|
183 |
-
with open(path, "r") as sent_file:
|
184 |
sentences[lang] = [l.strip() for l in sent_file.readlines()]
|
185 |
-
with open(metadata_path, "r") as metadata_file:
|
186 |
metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
|
187 |
for id_, metadata in enumerate(metadata_lines):
|
188 |
metadata = metadata.split("\t")
|
189 |
yield id_, {
|
190 |
**{
|
191 |
"id": id_ + 1,
|
192 |
-
"
|
193 |
-
"
|
194 |
-
"
|
195 |
"domain": metadata[3],
|
196 |
-
"
|
197 |
-
"
|
198 |
-
"speaker": metadata[6],
|
199 |
-
"turn": metadata[7]
|
200 |
}, **{
|
201 |
f"sentence_{lang}": sentences[lang][id_]
|
202 |
for lang in langs
|
|
|
1 |
# coding=utf-8
|
2 |
+
"""The IN-22 Gen Evaluation Benchmark for evaluation of Machine Translation for Indic Languages."""
|
3 |
|
4 |
import os
|
5 |
import sys
|
|
|
19 |
|
20 |
_DESCRIPTION = """\
|
21 |
IN-22 is a newly created comprehensive benchmark for evaluating machine translation performance in multi-domain, n-way parallel contexts across 22 Indic languages.
|
22 |
+
IN22-Gen is a general-purpose multi-domain evaluation subset of IN22. It has been created from two sources: Wikipedia and Web Sources offering diverse content spanning news, entertainment, culture, legal, and India-centric topics.
|
|
|
23 |
"""
|
24 |
|
25 |
_HOMEPAGE = "https://github.com/AI4Bharat/IndicTrans2"
|
|
|
39 |
|
40 |
_URL = "https://indictrans2-public.objectstore.e2enetworks.net/IN22_benchmark.tar.gz"
|
41 |
|
42 |
+
_SPLITS = ["gen"]
|
43 |
|
44 |
_SENTENCES_PATHS = {
|
45 |
lang: {
|
|
|
63 |
yield p
|
64 |
|
65 |
|
66 |
+
class IN22GenConfig(datasets.BuilderConfig):
|
67 |
+
"""BuilderConfig for the IN-22 Gen evaluation subset."""
|
68 |
def __init__(self, lang: str, lang2: str = None, **kwargs):
|
69 |
"""
|
70 |
Args:
|
|
|
75 |
self.lang2 = lang2
|
76 |
|
77 |
|
78 |
+
class IN22Gen(datasets.GeneratorBasedBuilder):
|
79 |
+
"""IN-22 Gen evaluation subset."""
|
80 |
|
81 |
BUILDER_CONFIGS = [
|
82 |
+
IN22GenConfig(
|
83 |
name=lang,
|
84 |
description=f"IN-22: {lang} subset.",
|
85 |
lang=lang
|
86 |
)
|
87 |
for lang in _LANGUAGES
|
88 |
] + [
|
89 |
+
IN22GenConfig(
|
90 |
name="all",
|
91 |
description=f"IN-22: all language pairs",
|
92 |
lang=None
|
93 |
)
|
94 |
] + [
|
95 |
+
IN22GenConfig(
|
96 |
name=f"{l1}-{l2}",
|
97 |
description=f"IN-22: {l1}-{l2} aligned subset.",
|
98 |
lang=l1,
|
|
|
103 |
def _info(self):
|
104 |
features = {
|
105 |
"id": datasets.Value("int32"),
|
106 |
+
"context": datasets.Value("string"),
|
107 |
+
"source": datasets.Value("string"),
|
108 |
+
"url": datasets.Value("string"),
|
109 |
"domain": datasets.Value("string"),
|
110 |
+
"num_words": datasets.Value("int32"),
|
111 |
+
"bucket": datasets.Value("string")
|
|
|
|
|
112 |
}
|
113 |
if self.config.name != "all" and "-" not in self.config.name:
|
114 |
features["sentence"] = datasets.Value("string")
|
|
|
150 |
def _generate_examples(self, sentence_paths: Union[str, List[str]], metadata_path: str, langs: Optional[List[str]] = None):
|
151 |
"""Yields examples as (key, example) tuples."""
|
152 |
if isinstance(sentence_paths, str):
|
153 |
+
with open(sentence_paths, "r",encoding="utf-8") as sentences_file:
|
154 |
+
with open(metadata_path, "r",encoding="utf-8") as metadata_file:
|
155 |
metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
|
156 |
for id_, (sentence, metadata) in enumerate(
|
157 |
zip(sentences_file, metadata_lines)
|
|
|
161 |
yield id_, {
|
162 |
"id": id_ + 1,
|
163 |
"sentence": sentence,
|
164 |
+
"context": metadata[0],
|
165 |
+
"source": metadata[1],
|
166 |
+
"url": metadata[2],
|
167 |
"domain": metadata[3],
|
168 |
+
"num_words": metadata[4],
|
169 |
+
"bucket": metadata[5]
|
|
|
|
|
170 |
}
|
171 |
else:
|
172 |
sentences = {}
|
|
|
175 |
else:
|
176 |
langs = [self.config.lang, self.config.lang2]
|
177 |
for path, lang in zip(sentence_paths, langs):
|
178 |
+
with open(path, "r",encoding="utf-8") as sent_file:
|
179 |
sentences[lang] = [l.strip() for l in sent_file.readlines()]
|
180 |
+
with open(metadata_path, "r",encoding="utf-8") as metadata_file:
|
181 |
metadata_lines = [l.strip() for l in metadata_file.readlines()[1:]]
|
182 |
for id_, metadata in enumerate(metadata_lines):
|
183 |
metadata = metadata.split("\t")
|
184 |
yield id_, {
|
185 |
**{
|
186 |
"id": id_ + 1,
|
187 |
+
"context": metadata[0],
|
188 |
+
"source": metadata[1],
|
189 |
+
"url": metadata[2],
|
190 |
"domain": metadata[3],
|
191 |
+
"num_words": metadata[4],
|
192 |
+
"bucket": metadata[5]
|
|
|
|
|
193 |
}, **{
|
194 |
f"sentence_{lang}": sentences[lang][id_]
|
195 |
for lang in langs
|