holylovenia commited on
Commit
d026897
·
verified ·
1 Parent(s): 1f88df0

Upload brcc.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. brcc.py +109 -0
brcc.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import os
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple
18
+
19
+ import datasets
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import Licenses, Tasks
24
+
25
+ _CITATION = """
26
+ @inproceedings{romadhona-etal-2022-brcc,
27
+ author = {Romadhona, Nanda Putri and Lu, Sin-En and Lu, Bo-Han and Tsai, Richard Tzong-Han},
28
+ title = {BRCC and SentiBahasaRojak: The First Bahasa Rojak Corpus for Pretraining and Sentiment Analysis Dataset},
29
+ booktitle = {Proceedings of the 29th International Conference on Computational Linguistics},
30
+ publisher = {International Committee on Computational Linguistics},
31
+ year = {2022},
32
+ url = {https://aclanthology.org/2022.coling-1.389/},
33
+ pages = {4418--4428},
34
+ }
35
+ """
36
+
37
+ _LOCAL = False
38
+ _LANGUAGES = ["zlm", "eng", "cmn"]
39
+ _DATASETNAME = "brcc"
40
+ _DESCRIPTION = """
41
+ The Bahasa Rojak Crawled Corpus (BRCC) is a code-mixed dataset for the Bahasa Rojak dialect in Malaysia.
42
+ Passages are generated through data augmentation from English and Malay Wikipedia pages using a modified CoSDA-ML method.
43
+ The quality of generated passages is evaluated by two native Malay speakers.
44
+ """
45
+ _HOMEPAGE = "https://data.depositar.io/dataset/brcc_and_sentibahasarojak"
46
+ _LICENSE = Licenses.UNKNOWN.value
47
+ _URL = "https://data.depositar.io/dataset/304d1572-27d6-4549-8292-b1c8f5e9c086/resource/8a558f64-98ff-4922-a751-0ce2ce8447bd/download/BahasaRojak_Datasets.zip"
48
+
49
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
50
+ _SOURCE_VERSION = "1.0.0"
51
+ _SEACROWD_VERSION = "2024.06.20"
52
+
53
+
54
+ class BRCCDataset(datasets.GeneratorBasedBuilder):
55
+ """Dataset of Bahasa Rojak passages generated from English and Malay Wikipedia pages."""
56
+
57
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
58
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
59
+
60
+ BUILDER_CONFIGS = [
61
+ SEACrowdConfig(
62
+ name=f"{_DATASETNAME}_source",
63
+ version=SOURCE_VERSION,
64
+ description=f"{_DATASETNAME} source schema",
65
+ schema="source",
66
+ subset_id=_DATASETNAME,
67
+ ),
68
+ SEACrowdConfig(
69
+ name=f"{_DATASETNAME}_seacrowd_ssp",
70
+ version=SEACROWD_VERSION,
71
+ description=f"{_DATASETNAME} SEACrowd ssp schema",
72
+ schema="seacrowd_ssp",
73
+ subset_id=_DATASETNAME,
74
+ ),
75
+ ]
76
+
77
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
78
+
79
+ def _info(self) -> datasets.DatasetInfo:
80
+ # Source schema = SeaCrowd schema because file only contains lines of text
81
+ if self.config.schema in ("source", "seacrowd_ssp"):
82
+ features = schemas.ssp_features
83
+ return datasets.DatasetInfo(
84
+ description=_DESCRIPTION,
85
+ features=features,
86
+ homepage=_HOMEPAGE,
87
+ license=_LICENSE,
88
+ citation=_CITATION,
89
+ )
90
+
91
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
92
+ """Returns SplitGenerators."""
93
+ data_dir = dl_manager.download_and_extract(_URL)
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={
98
+ "filepath": os.path.join(data_dir, "BahasaRojak Datasets", "BRCC", "mix.train"),
99
+ "split": "train",
100
+ },
101
+ )
102
+ ]
103
+
104
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
105
+ """Yields examples as (key, example) tuples."""
106
+ with open(filepath, encoding="utf-8") as f:
107
+ for idx, line in enumerate(f):
108
+ example = {"id": str(idx), "text": line.strip()}
109
+ yield idx, example