File size: 4,721 Bytes
309d724 705443a 309d724 705443a 309d724 705443a 309d724 705443a 309d724 705443a 309d724 705443a 309d724 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 |
# coding=utf-8
"""Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
from pathlib import Path
import datasets
import gdown
_CITATION = """\
@inproceedings{koehn-knowles-2017-six,
title = "Six Challenges for Neural Machine Translation",
author = "Koehn, Philipp and
Knowles, Rebecca",
booktitle = "Proceedings of the First Workshop on Neural Machine Translation",
month = aug,
year = "2017",
address = "Vancouver",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-3204",
doi = "10.18653/v1/W17-3204",
pages = "28--39",
}
@inproceedings{aharoni2020unsupervised,
title={Unsupervised domain clusters in pretrained language models},
author={Aharoni, Roee and Goldberg, Yoav},
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
year={2020},
url={https://arxiv.org/abs/2004.02105},
publisher = "Association for Computational Linguistics"
}
"""
_URL = "https://drive.google.com/file/d/1yvB-pvlojtT2UpOX1JvwtD6rw9joQ49A/view"
_HOMEPAGE = "https://github.com/roeeaharoni/unsupervised-domain-clusters"
_DOMAIN = ["it", "koran", "law", "medical", "subtitles"]
class DAMTConfig(datasets.BuilderConfig):
"""BuilderConfig for DAMT Dataset"""
def __init__(self, domain=None, **kwargs):
"""
Args:
domain: domain name.
**kwargs: keyword arguments forwarded to super.
"""
super(DAMTConfig, self).__init__(
name=domain,
description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation.",
version=datasets.Version("1.0.0", ""),
**kwargs,
)
# Validate domain name.
assert domain in _DOMAIN
self.domain = domain
class DAMT(datasets.GeneratorBasedBuilder):
"""Multi-domain German-English parallel dataset for Domain Adapted Machine Translation."""
BUILDER_CONFIGS = [DAMTConfig(domain=d) for d in _DOMAIN]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description="multi-domain German-English parallel dataset for Domain Adapted Machine Translation",
# datasets.features.FeatureConnectors
features=datasets.Features(
{"translation": datasets.features.Translation(languages=("en", "de"))}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
domain = self.config.domain
def _get_drive_url(url):
return f"https://drive.google.com/uc?id={url.split('/')[5]}"
cache_dir = dl_manager.download_config.cache_dir
assert Path(cache_dir).is_dir()
output = Path(cache_dir) / "multi_domain_new_split.zip"
if not output.exists():
dl_dir = gdown.download(_get_drive_url(_URL), output.as_posix(), quiet=True)
else:
dl_dir = output.as_posix()
ex_dir = dl_manager.extract(dl_dir)
assert Path(ex_dir).is_dir(), ex_dir
files = {
"train": {
"en_file": f"{ex_dir}/{domain}/train.en",
"de_file": f"{ex_dir}/{domain}/train.de",
},
"validation": {
"en_file": f"{ex_dir}/{domain}/dev.en",
"de_file": f"{ex_dir}/{domain}/dev.de",
},
"test": {
"en_file": f"{ex_dir}/{domain}/test.en",
"de_file": f"{ex_dir}/{domain}/test.de",
},
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["validation"]),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
]
def _generate_examples(self, en_file, de_file):
"""Yields examples."""
id_ = 0
with open(en_file, "r", encoding="utf-8") as en_f:
with open(de_file, "r", encoding="utf-8") as de_f:
for en, de in zip(en_f, de_f):
yield id_, {"translation": {"en": en.strip(), "de": de.strip()}}
id_ += 1
|