Datasets:

Languages:
Cebuano
ArXiv:
License:
holylovenia commited on
Commit
404c3b8
·
verified ·
1 Parent(s): 0f82129

Upload cebuaner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cebuaner.py +193 -0
cebuaner.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, Iterable, List, Tuple
3
+
4
+ import datasets
5
+ from datasets.download.download_manager import DownloadManager
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = r"""
12
+ @misc{pilar2023cebuaner,
13
+ title={CebuaNER: A New Baseline Cebuano Named Entity Recognition Model},
14
+ author={Ma. Beatrice Emanuela Pilar and Ellyza Mari Papas and Mary Loise Buenaventura and Dane Dedoroy and Myron Darrel Montefalcon and Jay Rhald Padilla and Lany Maceda and Mideth Abisado and Joseph Marvin Imperial},
15
+ year={2023},
16
+ eprint={2310.00679},
17
+ archivePrefix={arXiv},
18
+ primaryClass={cs.CL}
19
+ }
20
+ """
21
+
22
+ _LOCAL = False
23
+ _LANGUAGES = ["ceb"]
24
+ _DATASETNAME = "cebuaner"
25
+ _DESCRIPTION = """\
26
+ The CebuaNER dataset contains 4000+ news articles that have been tagged by
27
+ native speakers of Cebuano usin gthe BIO encoding schema for the named entity
28
+ recognition (NER) task.
29
+ """
30
+
31
+ _HOMEPAGE = "https://github.com/mebzmoren/CebuaNER"
32
+ _LICENSE = Licenses.CC_BY_NC_SA_4_0.value
33
+ _URLS = {
34
+ "annotator_1": "https://github.com/mebzmoren/CebuaNER/raw/main/data/annotated_data/final-1.txt",
35
+ "annotator_2": "https://github.com/mebzmoren/CebuaNER/raw/main/data/annotated_data/final-2.txt",
36
+ }
37
+
38
+ # The alignment between annotators is high, and both can be used as gold-standard data.
39
+ # Hence, we chose the first value on the index.
40
+ _DEFAULT_ANNOTATOR = "annotator_1"
41
+
42
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
43
+ _SOURCE_VERSION = "1.0.0"
44
+ _SEACROWD_VERSION = "2024.06.20"
45
+
46
+
47
+ class CebuaNERDataset(datasets.GeneratorBasedBuilder):
48
+ """CebuaNER dataset from https://github.com/mebzmoren/CebuaNER"""
49
+
50
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
51
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
52
+
53
+ SEACROWD_SCHEMA_NAME = "seq_label"
54
+ LABEL_CLASSES = [
55
+ "O",
56
+ "B-PER",
57
+ "I-PER",
58
+ "B-ORG",
59
+ "I-ORG",
60
+ "B-LOC",
61
+ "I-LOC",
62
+ "B-OTHER",
63
+ "I-OTHER",
64
+ ]
65
+
66
+ # There are two annotators in the CebuaNER dataset but there's no canonical
67
+ # label. Here, we decided to create loaders for both annotators. The
68
+ # inter-annotator reliability is high so it's possible to treat either as
69
+ # gold-standard data.
70
+ dataset_names = sorted([f"{_DATASETNAME}_{annot}" for annot in _URLS.keys()])
71
+ BUILDER_CONFIGS = []
72
+ for name in dataset_names:
73
+ source_config = SEACrowdConfig(
74
+ name=f"{name}_source",
75
+ version=SOURCE_VERSION,
76
+ description=f"{_DATASETNAME} source schema",
77
+ schema="source",
78
+ subset_id=name,
79
+ )
80
+ BUILDER_CONFIGS.append(source_config)
81
+ seacrowd_config = SEACrowdConfig(
82
+ name=f"{name}_seacrowd_{SEACROWD_SCHEMA_NAME}",
83
+ version=SEACROWD_VERSION,
84
+ description=f"{_DATASETNAME} SEACrowd schema",
85
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
86
+ subset_id=name,
87
+ )
88
+ BUILDER_CONFIGS.append(seacrowd_config)
89
+
90
+ # Create a configuration that loads the annotations of the first annotator
91
+ # and treat that as the default.
92
+ BUILDER_CONFIGS.extend([
93
+ SEACrowdConfig(
94
+ name=f"{_DATASETNAME}_source",
95
+ version=SOURCE_VERSION,
96
+ description=f"{_DATASETNAME} source schema",
97
+ schema="source",
98
+ subset_id=_DATASETNAME,
99
+ ),
100
+ SEACrowdConfig(
101
+ name=f"{_DATASETNAME}_seacrowd_{SEACROWD_SCHEMA_NAME}",
102
+ version=SEACROWD_VERSION,
103
+ description=f"{_DATASETNAME} SEACrowd schema",
104
+ schema=f"seacrowd_{SEACROWD_SCHEMA_NAME}",
105
+ subset_id=_DATASETNAME,
106
+ ),
107
+ ])
108
+
109
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
110
+
111
+ def _info(self) -> datasets.DatasetInfo:
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "tokens": datasets.Sequence(datasets.Value("string")),
117
+ "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=self.LABEL_CLASSES)),
118
+ }
119
+ )
120
+ elif self.config.schema == f"seacrowd_{self.SEACROWD_SCHEMA_NAME}":
121
+ features = schemas.seq_label_features(self.LABEL_CLASSES)
122
+
123
+ return datasets.DatasetInfo(
124
+ description=_DESCRIPTION,
125
+ features=features,
126
+ homepage=_HOMEPAGE,
127
+ license=_LICENSE,
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
132
+ if self.config.subset_id == _DATASETNAME:
133
+ url = _URLS[_DEFAULT_ANNOTATOR]
134
+ else:
135
+ _, annotator = self.config.subset_id.split("_", 1)
136
+ url = _URLS[annotator]
137
+ data_file = Path(dl_manager.download_and_extract(url))
138
+ return [
139
+ datasets.SplitGenerator(
140
+ name=datasets.Split.TRAIN,
141
+ gen_kwargs={"filepath": data_file, "split": "train"},
142
+ ),
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.VALIDATION,
145
+ gen_kwargs={"filepath": data_file, "split": "dev"},
146
+ ),
147
+ datasets.SplitGenerator(
148
+ name=datasets.Split.TEST,
149
+ gen_kwargs={"filepath": data_file, "split": "test"},
150
+ ),
151
+ ]
152
+
153
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
154
+ label_key = "ner_tags" if self.config.schema == "source" else "labels"
155
+ examples: Iterable[Dict[str, List[str]]] = []
156
+ with open(filepath, encoding="utf-8") as f:
157
+ tokens = []
158
+ ner_tags = []
159
+ for line in f:
160
+ # There's no clear delimiter in the IOB file so I'm separating each example based on the newline.
161
+ # The -DOCSTART- delimiter only shows up in the very first example. In their notebook example
162
+ # https://github.com/mebzmoren/CebuaNER/blob/main/notebooks/Named-Entity-Recognition-with-Conditional-Random-Fields.ipynb,
163
+ # they used '' as their article delimiter.
164
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
165
+ if tokens:
166
+ examples.append({"tokens": tokens, label_key: ner_tags})
167
+ if len(tokens) != len(ner_tags):
168
+ raise ValueError(f"Tokens and tags are not aligned! {len(tokens)} != {len(ner_tags)}")
169
+ tokens = []
170
+ ner_tags = []
171
+ else:
172
+ # CebuaNER iob are separated by spaces
173
+ token, _, _, ner_tag = line.split(" ")
174
+ tokens.append(token)
175
+ ner_tags.append(ner_tag.rstrip())
176
+ if tokens:
177
+ examples.append({"tokens": tokens, label_key: ner_tags})
178
+ if len(tokens) != len(ner_tags):
179
+ raise ValueError(f"Tokens and tags are not aligned! {len(tokens)} != {len(ner_tags)}")
180
+
181
+ # The CebuaNER paper doesn't provide a recommended split. However, the Github repository
182
+ # contains a notebook example of the split they used in the report:
183
+ # https://github.com/mebzmoren/CebuaNER/blob/main/notebooks/Named-Entity-Recognition-with-Conditional-Random-Fields.ipynb
184
+ if split == "train":
185
+ final_examples = examples[0:2980]
186
+ if split == "test":
187
+ final_examples = examples[2980:3831]
188
+ if split == "dev":
189
+ final_examples = examples[3831:]
190
+
191
+ for idx, eg in enumerate(final_examples):
192
+ eg["id"] = idx
193
+ yield idx, eg