Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +2 -1
- capes.py +18 -21
- dataset_infos.json +1 -1
README.md
CHANGED
@@ -19,9 +19,10 @@ task_categories:
|
|
19 |
task_ids:
|
20 |
- machine-translation
|
21 |
paperswithcode_id: capes
|
|
|
22 |
---
|
23 |
|
24 |
-
# Dataset Card for
|
25 |
|
26 |
## Table of Contents
|
27 |
- [Dataset Description](#dataset-description)
|
|
|
19 |
task_ids:
|
20 |
- machine-translation
|
21 |
paperswithcode_id: capes
|
22 |
+
pretty_name: CAPES
|
23 |
---
|
24 |
|
25 |
+
# Dataset Card for CAPES
|
26 |
|
27 |
## Table of Contents
|
28 |
- [Dataset Description](#dataset-description)
|
capes.py
CHANGED
@@ -15,8 +15,6 @@
|
|
15 |
"""Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
|
16 |
|
17 |
|
18 |
-
import os
|
19 |
-
|
20 |
import datasets
|
21 |
|
22 |
|
@@ -71,31 +69,30 @@ class Capes(datasets.GeneratorBasedBuilder):
|
|
71 |
|
72 |
def _split_generators(self, dl_manager):
|
73 |
"""Returns SplitGenerators."""
|
74 |
-
|
75 |
return [
|
76 |
datasets.SplitGenerator(
|
77 |
name=datasets.Split.TRAIN,
|
78 |
gen_kwargs={
|
79 |
-
"source_file":
|
80 |
-
"target_file":
|
|
|
|
|
81 |
},
|
82 |
),
|
83 |
]
|
84 |
|
85 |
-
def _generate_examples(self, source_file, target_file):
|
86 |
-
with open(source_file, encoding="utf-8") as f:
|
87 |
-
source_sentences = f.read().split("\n")
|
88 |
-
with open(target_file, encoding="utf-8") as f:
|
89 |
-
target_sentences = f.read().split("\n")
|
90 |
-
|
91 |
-
assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
|
92 |
-
len(source_sentences),
|
93 |
-
len(target_sentences),
|
94 |
-
source_file,
|
95 |
-
target_file,
|
96 |
-
)
|
97 |
-
|
98 |
source, target = tuple(self.config.name.split("-"))
|
99 |
-
for
|
100 |
-
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"""Capes: Parallel corpus of theses and dissertation abstracts in Portuguese and English from CAPES"""
|
16 |
|
17 |
|
|
|
|
|
18 |
import datasets
|
19 |
|
20 |
|
|
|
69 |
|
70 |
def _split_generators(self, dl_manager):
|
71 |
"""Returns SplitGenerators."""
|
72 |
+
archive = dl_manager.download(_URL)
|
73 |
return [
|
74 |
datasets.SplitGenerator(
|
75 |
name=datasets.Split.TRAIN,
|
76 |
gen_kwargs={
|
77 |
+
"source_file": "en_pt.en",
|
78 |
+
"target_file": "en_pt.pt",
|
79 |
+
"src_files": dl_manager.iter_archive(archive),
|
80 |
+
"tgt_files": dl_manager.iter_archive(archive),
|
81 |
},
|
82 |
),
|
83 |
]
|
84 |
|
85 |
+
def _generate_examples(self, source_file, target_file, src_files, tgt_files):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
source, target = tuple(self.config.name.split("-"))
|
87 |
+
for src_path, src_f in src_files:
|
88 |
+
if src_path == source_file:
|
89 |
+
for tgt_path, tgt_f in tgt_files:
|
90 |
+
if tgt_path == target_file:
|
91 |
+
for idx, (l1, l2) in enumerate(zip(src_f, tgt_f)):
|
92 |
+
l1 = l1.decode("utf-8").strip()
|
93 |
+
l2 = l2.decode("utf-8").strip()
|
94 |
+
if l1 and l2:
|
95 |
+
result = {"translation": {source: l1, target: l2}}
|
96 |
+
yield idx, result
|
97 |
+
break
|
98 |
+
break
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"en-pt": {"description": "A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the CAPES website (Coordena\u00e7\u00e3o de Aperfei\u00e7oamento de Pessoal de N\u00edvel Superior) - Brazil. The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were collected and aligned using the Hunalign algorithm.\n", "citation": "@inproceedings{soares2018parallel,\n title={A Parallel Corpus of Theses and Dissertations Abstracts},\n author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose},\n booktitle={International Conference on Computational Processing of the Portuguese Language},\n pages={345--352},\n year={2018},\n organization={Springer}\n}\n", "homepage": "https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6", "license": "", "features": {"translation": {"languages": ["en", "pt"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "builder_name": "capes", "config_name": "en-pt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes":
|
|
|
1 |
+
{"en-pt": {"description": "A parallel corpus of theses and dissertations abstracts in English and Portuguese were collected from the CAPES website (Coordena\u00e7\u00e3o de Aperfei\u00e7oamento de Pessoal de N\u00edvel Superior) - Brazil. The corpus is sentence aligned for all language pairs. Approximately 240,000 documents were collected and aligned using the Hunalign algorithm.\n", "citation": "@inproceedings{soares2018parallel,\n title={A Parallel Corpus of Theses and Dissertations Abstracts},\n author={Soares, Felipe and Yamashita, Gabrielli Harumi and Anzanello, Michel Jose},\n booktitle={International Conference on Computational Processing of the Portuguese Language},\n pages={345--352},\n year={2018},\n organization={Springer}\n}\n", "homepage": "https://sites.google.com/view/felipe-soares/datasets#h.p_kxOR6EhHm2a6", "license": "", "features": {"translation": {"languages": ["en", "pt"], "id": null, "_type": "Translation"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "capes", "config_name": "en-pt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 472484364, "num_examples": 1157610, "dataset_name": "capes"}}, "download_checksums": {"https://ndownloader.figstatic.com/files/14015837": {"num_bytes": 162229298, "checksum": "08e5739e78cd5b68ca6b29507f2a746fd3a5fbdec8dde2700a4141030d21e143"}}, "download_size": 162229298, "post_processing_size": null, "dataset_size": 472484364, "size_in_bytes": 634713662}}
|