Datasets:

ArXiv:
License:
holylovenia commited on
Commit
8bbddd9
1 Parent(s): ac5dfbb

Upload beaye_lexicon.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. beaye_lexicon.py +115 -0
beaye_lexicon.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils.configs import SEACrowdConfig
8
+ from seacrowd.utils.constants import Licenses
9
+
10
+ _CITATION = """\
11
+ @misc{lopo2024constructing,
12
+ title={Constructing and Expanding Low-Resource and Underrepresented Parallel Datasets for Indonesian Local Languages},
13
+ author={Joanito Agili Lopo and Radius Tanone},
14
+ year={2024},
15
+ eprint={2404.01009},
16
+ archivePrefix={arXiv},
17
+ primaryClass={cs.CL}
18
+ }
19
+ """
20
+
21
+ _DATASETNAME = "beaye_lexicon"
22
+ _DESCRIPTION = """The Beaye Lexicon is a lexicon resource encompassing translations between Indonesian, English, and
23
+ Beaye words. Developed through a collaborative effort involving two native Beaye speakers and evaluated by linguistic
24
+ experts, this lexicon comprises 984 Beaye vocabularies. The creation of the Beaye Lexicon marks the inaugural effort in
25
+ documenting the previously unrecorded Beaye language."""
26
+
27
+ _HOMEPAGE = "https://github.com/joanitolopo/bhinneka-korpus/tree/main/lexicon"
28
+ _LICENSE = Licenses.APACHE_2_0.value
29
+ _URLS = "https://raw.githubusercontent.com/joanitolopo/bhinneka-korpus/main/lexicon"
30
+ _SUPPORTED_TASKS = []
31
+ _SOURCE_VERSION = "1.0.0"
32
+ _SEACROWD_VERSION = "2024.06.20"
33
+ _LOCAL = False
34
+
35
+ _LANGUAGES = ["ind", "day", "eng"]
36
+
37
+ class BeayeLexicon(datasets.GeneratorBasedBuilder):
38
+ """Beaye Lexicon is a lexicon resource encompassing translations between Indonesian, English, and Beaye words"""
39
+
40
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
41
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
42
+
43
+ BUILDER_CONFIGS = (
44
+ [
45
+ SEACrowdConfig(
46
+ name=f"{_DATASETNAME}_{lang}_source",
47
+ version=datasets.Version(_SOURCE_VERSION),
48
+ description=f"beaye lexicon with source schema for {lang} language",
49
+ schema="source",
50
+ subset_id="beaye_lexicon",
51
+ )
52
+ for lang in _LANGUAGES if lang != "eng"
53
+ ]
54
+ + [
55
+ SEACrowdConfig(
56
+ name=f"{_DATASETNAME}_ext_{lang}_source",
57
+ version=datasets.Version(_SOURCE_VERSION),
58
+ description=f"beaye lexicon with source schema for extensive definiton of beaye language",
59
+ schema="source",
60
+ subset_id="beaye_lexicon",
61
+ )
62
+ for lang in _LANGUAGES if lang != "ind"
63
+ ]
64
+ )
65
+
66
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_ind_source"
67
+
68
+ def _info(self) -> datasets.DatasetInfo:
69
+ schema = self.config.schema
70
+ if schema == "source":
71
+ features = datasets.Features({"id": datasets.Value("string"), "word": datasets.Value("string")})
72
+ else:
73
+ raise NotImplementedError()
74
+
75
+ return datasets.DatasetInfo(
76
+ description=_DESCRIPTION,
77
+ features=features,
78
+ homepage=_HOMEPAGE,
79
+ license=_LICENSE,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
84
+ """Returns SplitGenerators."""
85
+ if "ext" in self.config.name.split("_"):
86
+ data_dir = Path(dl_manager.download(_URLS + "/english.xlsx"))
87
+ else:
88
+ data_dir = Path(dl_manager.download(_URLS + "/lexicon.xlsx"))
89
+
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ gen_kwargs={
94
+ "filepath": data_dir,
95
+ "split": "train",
96
+ }
97
+ )
98
+ ]
99
+
100
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
101
+ """Yields examples as (key, example) tuples."""
102
+ dfs = pd.read_excel(filepath, engine="openpyxl")
103
+ if "ext" in self.config.name.split("_"):
104
+ lang = self.config.name.split("_")[3]
105
+ else:
106
+ lang = self.config.name.split("_")[2]
107
+
108
+ text = dfs[lang]
109
+
110
+ if self.config.schema == "source":
111
+ for idx, word in enumerate(text.values):
112
+ row = {"id": str(idx), "word": word}
113
+ yield idx, row
114
+ else:
115
+ raise ValueError(f"Invalid config: {self.config.name}")