Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
a45d0b4
·
verified ·
1 Parent(s): bd18428

Upload vlsp2016_ner.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlsp2016_ner.py +164 -0
vlsp2016_ner.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This dataset is collected from electronic newspapers published on the web and provided by VLSP organization.\
18
+ It consists of approximately 15k sentences, each of which contain NE information in the IOB annotation format\
19
+ """
20
+ from pathlib import Path
21
+ from typing import Dict, List, Tuple
22
+
23
+ import datasets
24
+ import pandas as pd
25
+
26
+ from seacrowd.utils import schemas
27
+ from seacrowd.utils.configs import SEACrowdConfig
28
+ from seacrowd.utils.constants import Licenses, Tasks
29
+
30
+ _CITATION = """\
31
+ @article{nguyen-et-al-2019-vlsp-ner,
32
+ author = {Nguyen, Huyen and Ngo, Quyen and Vu, Luong and Mai, Vu and Nguyen, Hien},
33
+ year = {2019},
34
+ month = {01},
35
+ pages = {283-294},
36
+ title = {VLSP Shared Task: Named Entity Recognition},
37
+ volume = {34},
38
+ journal = {Journal of Computer Science and Cybernetics},
39
+ doi = {10.15625/1813-9663/34/4/13161}
40
+ }
41
+ """
42
+
43
+ _DATASETNAME = "vlsp2016_ner"
44
+
45
+ _DESCRIPTION = """\
46
+ This dataset is collected from electronic newspapers published on the web and provided by VLSP organization. \
47
+ It consists of approximately 15k sentences, each of which contain NE information in the IOB annotation format
48
+ """
49
+
50
+ _HOMEPAGE = "https://huggingface.co/datasets/datnth1709/VLSP2016-NER-data"
51
+
52
+ _LANGUAGES = ["vie"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
53
+
54
+ _LICENSE = Licenses.CC_BY_NC_4_0.value
55
+
56
+ _LOCAL = False
57
+
58
+ _URLS = {
59
+ _DATASETNAME: {
60
+ "train": "https://huggingface.co/datasets/datnth1709/VLSP2016-NER-data/resolve/main/data/train-00000-of-00001-b0417886a268b83a.parquet?download=true",
61
+ "test": "https://huggingface.co/datasets/datnth1709/VLSP2016-NER-data/resolve/main/data/valid-00000-of-00001-846411c236133ba3.parquet?download=true",
62
+ },
63
+ }
64
+
65
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
66
+
67
+ _SOURCE_VERSION = "1.0.0"
68
+
69
+ _SEACROWD_VERSION = "2024.06.20"
70
+
71
+
72
+ class Visp2016NER(datasets.GeneratorBasedBuilder):
73
+ """This dataset is collected from electronic newspapers published on the web and provided by VLSP organization.
74
+ It consists of approximately 15k sentences, each of which contain NE information in the IOB annotation format"""
75
+
76
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
77
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
78
+
79
+ BUILDER_CONFIGS = [
80
+ SEACrowdConfig(
81
+ name="vlsp2016_ner_source",
82
+ version=SOURCE_VERSION,
83
+ description="vlsp2016_ner source schema",
84
+ schema="source",
85
+ subset_id="vlsp2016_ner",
86
+ ),
87
+ SEACrowdConfig(
88
+ name="vlsp2016_ner_seacrowd_seq_label",
89
+ version=SEACROWD_VERSION,
90
+ description="vlsp2016_ner SEACrowd schema",
91
+ schema="seacrowd_seq_label",
92
+ subset_id="vlsp2016_ner",
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = "vlsp2016_ner_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+ if self.config.schema == "source":
100
+ features = datasets.Features(
101
+ {
102
+ "tokens": datasets.Sequence(datasets.Value("string")),
103
+ "ner_tags": datasets.Sequence(datasets.Value("int64")),
104
+ }
105
+ )
106
+ elif self.config.schema == "seacrowd_seq_label":
107
+ features = schemas.seq_label.features([x for x in range(9)])
108
+
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=features,
112
+ homepage=_HOMEPAGE,
113
+ license=_LICENSE,
114
+ citation=_CITATION,
115
+ )
116
+
117
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
118
+ """Returns SplitGenerators."""
119
+ train_url = _URLS[_DATASETNAME]["train"]
120
+ train_path = dl_manager.download_and_extract(train_url)
121
+
122
+ test_url = _URLS[_DATASETNAME]["test"]
123
+ test_path = dl_manager.download_and_extract(test_url)
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "filepath": train_path,
130
+ "split": "train",
131
+ },
132
+ ),
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TEST,
135
+ gen_kwargs={
136
+ "filepath": test_path,
137
+ "split": "test",
138
+ },
139
+ ),
140
+ ]
141
+
142
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
143
+ df = pd.read_parquet(filepath)
144
+ if self.config.schema == "source":
145
+ for i in range(len(df)):
146
+ row = df.iloc[i]
147
+ yield (
148
+ i,
149
+ {
150
+ "tokens": row["tokens"],
151
+ "ner_tags": row["ner_tags"],
152
+ },
153
+ )
154
+ elif self.config.schema == "seacrowd_seq_label":
155
+ for i in range(len(df)):
156
+ row = df.iloc[i]
157
+ yield (
158
+ i,
159
+ {
160
+ "id": i,
161
+ "tokens": row["tokens"],
162
+ "labels": row["ner_tags"],
163
+ },
164
+ )