Datasets:
EMBO
/

Languages:
English
ArXiv:
DOI:
License:
drAbreu commited on
Commit
59171e9
1 Parent(s): e7048f4

First commit SourceData.py

Browse files
Files changed (1) hide show
  1. SourceData.py +273 -0
SourceData.py CHANGED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ # template from : https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import datasets
23
+
24
+ _BASE_URL = "https://huggingface.co/datasets/EMBO/SourceData/resolve/main/"
25
+
26
+ class SourceData(datasets.GeneratorBasedBuilder):
27
+ """SourceDataNLP provides datasets to train NLP tasks in cell and molecular biology."""
28
+
29
+ _NER_LABEL_NAMES = [
30
+ "O",
31
+ "B-SMALL_MOLECULE",
32
+ "I-SMALL_MOLECULE",
33
+ "B-GENEPROD",
34
+ "I-GENEPROD",
35
+ "B-SUBCELLULAR",
36
+ "I-SUBCELLULAR",
37
+ "B-CELL_TYPE",
38
+ "I-CELL_TYPE",
39
+ "B-TISSUE",
40
+ "I-TISSUE",
41
+ "B-ORGANISM",
42
+ "I-ORGANISM",
43
+ "B-EXP_ASSAY",
44
+ "I-EXP_ASSAY",
45
+ "B-DISEASE",
46
+ "I-DISEASE",
47
+ "B-CELL_LINE",
48
+ "I-CELL_LINE"
49
+ ]
50
+ _SEMANTIC_ROLES = ["O", "B-CONTROLLED_VAR", "I-CONTROLLED_VAR", "B-MEASURED_VAR", "I-MEASURED_VAR"]
51
+ _PANEL_START_NAMES = ["O", "B-PANEL_START", "I-PANEL_START"]
52
+ _ROLES_MULTI = ["O", "GENEPROD", "SMALL_MOLECULE"]
53
+
54
+ _CITATION = """\
55
+ @Unpublished{
56
+ huggingface: dataset,
57
+ title = {SourceData NLP},
58
+ authors={Thomas Lemberger & Jorge Abreu-Vicente, EMBO},
59
+ year={2023}
60
+ }
61
+ """
62
+
63
+ _DESCRIPTION = """\
64
+ This dataset is based on the SourceData database and is intented to facilitate training of NLP tasks in the cell and molecualr biology domain.
65
+ """
66
+
67
+ _HOMEPAGE = "https://huggingface.co/datasets/EMBO/SourceData"
68
+
69
+ _LICENSE = "CC-BY 4.0"
70
+
71
+ VERSION = datasets.Version(self.config.version)
72
+
73
+ _URLS = {
74
+ "NER": f"{_BASE_URL}token_classification_v{self.config.version}/ner/",
75
+ "PANELIZATION": f"{_BASE_URL}token_classification_v{self.config.version}/panelization/",
76
+ "ROLES_GP": f"{_BASE_URL}token_classification_v{self.config.version}/roles_gene/",
77
+ "ROLES_SM": f"{_BASE_URL}token_classification_v{self.config.version}/roles_small_mol/",
78
+ "ROLES_MULTI": f"{_BASE_URL}token_classification_v{self.config.version}/roles_multi/",
79
+ }
80
+ BUILDER_CONFIGS = [
81
+ datasets.BuilderConfig(name="NER", version=VERSION, description="Dataset for named-entity recognition."),
82
+ datasets.BuilderConfig(name="PANELIZATION", version=VERSION, description="Dataset to separate figure captions into panels."),
83
+ datasets.BuilderConfig(name="ROLES_GP", version=VERSION, description="Dataset for semantic roles of gene products."),
84
+ datasets.BuilderConfig(name="ROLES_SM", version=VERSION, description="Dataset for semantic roles of small molecules."),
85
+ datasets.BuilderConfig(name="ROLES_MULTI", version=VERSION, description="Dataset to train roles. ROLES_GP and ROLES_SM at once."),
86
+ ]
87
+ DEFAULT_CONFIG_NAME = "NER"
88
+
89
+ def _info(self):
90
+ if self.config.name == "NER":
91
+ features = datasets.Features(
92
+ {
93
+ "words": datasets.Sequence(feature=datasets.Value("string")),
94
+ "labels": datasets.Sequence(
95
+ feature=datasets.ClassLabel(num_classes=len(self._NER_LABEL_NAMES),
96
+ names=self._NER_LABEL_NAMES)
97
+ ),
98
+ "is_category": datasets.Sequence(feature=datasets.Value("int8")),
99
+ "text": datasets.Value("string"),
100
+ }
101
+ )
102
+ elif self.config.name == "ROLES_GP":
103
+ features = datasets.Features(
104
+ {
105
+ "words": datasets.Sequence(feature=datasets.Value("string")),
106
+ "labels": datasets.Sequence(
107
+ feature=datasets.ClassLabel(
108
+ num_classes=len(self._SEMANTIC_ROLES),
109
+ names=self._SEMANTIC_ROLES
110
+ )
111
+ ),
112
+ "is_category": datasets.Sequence(feature=datasets.Value("int8")),
113
+ "text": datasets.Value("string"),
114
+ }
115
+ )
116
+ elif self.config.name == "ROLES_SM":
117
+ features = datasets.Features(
118
+ {
119
+ "words": datasets.Sequence(feature=datasets.Value("string")),
120
+ "labels": datasets.Sequence(
121
+ feature=datasets.ClassLabel(
122
+ num_classes=len(self._SEMANTIC_ROLES),
123
+ names=self._SEMANTIC_ROLES
124
+ )
125
+ ),
126
+ "is_category": datasets.Sequence(feature=datasets.Value("int8")),
127
+ "text": datasets.Value("string"),
128
+ }
129
+ )
130
+ elif self.config.name == "ROLES_MULTI":
131
+ features = datasets.Features(
132
+ {
133
+ "words": datasets.Sequence(feature=datasets.Value("string")),
134
+ "labels": datasets.Sequence(
135
+ feature=datasets.ClassLabel(
136
+ num_classes=len(self._SEMANTIC_ROLES),
137
+ names=self._SEMANTIC_ROLES
138
+ )
139
+ ),
140
+ "is_category": datasets.Sequence(
141
+ feature=datasets.ClassLabel(
142
+ num_classes=len(self._ROLES_MULTI),
143
+ names=self._ROLES_MULTI
144
+ ),
145
+ "text": datasets.Value("string"),
146
+ }
147
+ )
148
+ elif self.config.name == "PANELIZATION":
149
+ features = datasets.Features(
150
+ {
151
+ "words": datasets.Sequence(feature=datasets.Value("string")),
152
+ "labels": datasets.Sequence(
153
+ feature=datasets.ClassLabel(num_classes=len(self._PANEL_START_NAMES),
154
+ names=self._PANEL_START_NAMES)
155
+ ),
156
+ }
157
+ )
158
+
159
+ return datasets.DatasetInfo(
160
+ description=self._DESCRIPTION,
161
+ features=features,
162
+ supervised_keys=("words", "label_ids"),
163
+ homepage=self._HOMEPAGE,
164
+ license=self._LICENSE,
165
+ citation=self._CITATION,
166
+ )
167
+
168
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
169
+ """Returns SplitGenerators.
170
+ Uses local files if a data_dir is specified. Otherwise downloads the files from their official url."""
171
+
172
+ if self.config.name == "NER":
173
+ url = self._URLS["NER"]
174
+ data_dir = dl_manager.download_and_extract(url)
175
+ data_dir += "/"
176
+ elif self.config.name == "PANELIZATION":
177
+ url = self._URLS["PANELIZATION"]
178
+ data_dir = dl_manager.download_and_extract(url)
179
+ data_dir += "/"
180
+ elif self.config.name == "ROLES_GP":
181
+ url = self._URLS["ROLES_GP"]
182
+ data_dir = dl_manager.download_and_extract(url)
183
+ data_dir += "/"
184
+ elif self.config.name == "ROLES_SM":
185
+ url = self._URLS["ROLES_SM"]
186
+ data_dir = dl_manager.download_and_extract(url)
187
+ data_dir += "/"
188
+ elif self.config.name == "ROLES_MULTI":
189
+ url = self._URLS["ROLES_MULTI"]
190
+ data_dir = dl_manager.download_and_extract(url)
191
+ data_dir += "/"
192
+ else:
193
+ raise ValueError(f"unkonwn config name: {self.config.name}")
194
+
195
+ return [
196
+ datasets.SplitGenerator(
197
+ name=datasets.Split.TRAIN,
198
+ # These kwargs will be passed to _generate_examples
199
+ gen_kwargs={
200
+ "filepath": data_dir + "/train.jsonl"},
201
+ ),
202
+ datasets.SplitGenerator(
203
+ name=datasets.Split.TEST,
204
+ gen_kwargs={
205
+ "filepath": data_dir + "/test.jsonl"},
206
+ ),
207
+ datasets.SplitGenerator(
208
+ name=datasets.Split.VALIDATION,
209
+ gen_kwargs={
210
+ "filepath": data_dir + "/eval.jsonl"},
211
+ ),
212
+ ]
213
+
214
+
215
+ BUILDER_CONFIGS = [
216
+ datasets.BuilderConfig(name="NER", version=VERSION, description="Dataset for named-entity recognition."),
217
+ datasets.BuilderConfig(name="PANELIZATION", version=VERSION, description="Dataset to separate figure captions into panels."),
218
+ datasets.BuilderConfig(name="ROLES_GP", version=VERSION, description="Dataset for semantic roles of gene products."),
219
+ datasets.BuilderConfig(name="ROLES_SM", version=VERSION, description="Dataset for semantic roles of small molecules."),
220
+ datasets.BuilderConfig(name="ROLES_MULTI", version=VERSION, description="Dataset to train roles. ROLES_GP and ROLES_SM at once."),
221
+ ]
222
+
223
+ def _generate_examples(self, filepath):
224
+ """Yields examples. This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
225
+ It is in charge of opening the given file and yielding (key, example) tuples from the dataset
226
+ The key is not important, it's more here for legacy reason (legacy from tfds)"""
227
+
228
+ with open(filepath, encoding="utf-8") as f:
229
+ # logger.info("⏳ Generating examples from = %s", filepath)
230
+ for id_, row in enumerate(f):
231
+ data = json.loads(row)
232
+ if self.config.name == "NER":
233
+ yield id_, {
234
+ "words": data["words"],
235
+ "labels": data["labels"],
236
+ "tag_mask": data["is_category"],
237
+ "text": data["text"]
238
+ }
239
+ elif self.config.name == "ROLES_GP":
240
+ yield id_, {
241
+ "words": data["words"],
242
+ "labels": data["labels"],
243
+ "tag_mask": data["is_category"],
244
+ "text": data["text"]
245
+ }
246
+ elif self.config.name == "ROLES_MULTI":
247
+ labels = data["labels"]
248
+ tag_mask = [1 if t!=0 else 0 for t in labels]
249
+ yield id_, {
250
+ "words": data["words"],
251
+ "labels": data["labels"],
252
+ "tag_mask": tag_mask,
253
+ "category": data["is_category"],
254
+ "text": data["text"]
255
+ }
256
+ elif self.config.name == "ROLES_SM":
257
+ yield id_, {
258
+ "words": data["words"],
259
+ "labels": data["labels"],
260
+ "tag_mask": data["is_category"],
261
+ "text": data["text"]
262
+ }
263
+ elif self.config.name == "PANELIZATION":
264
+ labels = data["labels"]
265
+ tag_mask = [1 if t == "B-PANEL_START" else 0 for t in labels]
266
+ yield id_, {
267
+ "words": data["words"],
268
+ "labels": data["labels"],
269
+ "tag_mask": tag_mask,
270
+ "text": data["text"]
271
+ }
272
+
273
+