Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
9bae2e6
1 Parent(s): ecf5b98

upload hubscripts/scai_chemical_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. scai_chemical.py +257 -0
scai_chemical.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ A dataset loader for the SCAI Chemical dataset.
18
+
19
+ SCAI Chemical is a corpus of MEDLINE abstracts that has been annotated
20
+ to give an overview of the different chemical name classes
21
+ found in MEDLINE text.
22
+ """
23
+
24
+ import gzip
25
+ from typing import Dict, List, Tuple
26
+
27
+ import datasets
28
+
29
+ from .bigbiohub import kb_features
30
+ from .bigbiohub import BigBioConfig
31
+ from .bigbiohub import Tasks
32
+
33
+ _LANGUAGES = ['English']
34
+ _PUBMED = True
35
+ _LOCAL = False
36
+ _CITATION = """\
37
+ @inproceedings{kolarik:lrec-ws08,
38
+ author = {Kol{\'a}{\vr}ik, Corinna and Klinger, Roman and Friedrich, Christoph M and Hofmann-Apitius, Martin and Fluck, Juliane},
39
+ title = {Chemical Names: {T}erminological Resources and Corpora Annotation},
40
+ booktitle = {LREC Workshop on Building and Evaluating Resources for Biomedical Text Mining},
41
+ year = {2008},
42
+ }
43
+ """
44
+
45
+ _DATASETNAME = "scai_chemical"
46
+ _DISPLAYNAME = "SCAI Chemical"
47
+
48
+ _DESCRIPTION = """\
49
+ SCAI Chemical is a corpus of MEDLINE abstracts that has been annotated
50
+ to give an overview of the different chemical name classes
51
+ found in MEDLINE text.
52
+ """
53
+
54
+ _HOMEPAGE = "https://www.scai.fraunhofer.de/en/business-research-areas/bioinformatics/downloads/corpora-for-chemical-entity-recognition.html"
55
+
56
+ _LICENSE = 'License information unavailable'
57
+
58
+ _URLS = {
59
+ _DATASETNAME: "https://www.scai.fraunhofer.de/content/dam/scai/de/downloads/bioinformatik/Corpora-for-Chemical-Entity-Recognition/chemicals-test-corpus-27-04-2009-v3_iob.gz",
60
+ }
61
+
62
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
63
+
64
+ _SOURCE_VERSION = "3.0.0"
65
+
66
+ _BIGBIO_VERSION = "1.0.0"
67
+
68
+
69
+ class ScaiChemicalDataset(datasets.GeneratorBasedBuilder):
70
+ """SCAI Chemical is a dataset annotated in 2008 with mentions of chemicals."""
71
+
72
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
73
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
74
+
75
+ BUILDER_CONFIGS = [
76
+ BigBioConfig(
77
+ name="scai_chemical_source",
78
+ version=SOURCE_VERSION,
79
+ description="SCAI Chemical source schema",
80
+ schema="source",
81
+ subset_id="scai_chemical",
82
+ ),
83
+ BigBioConfig(
84
+ name="scai_chemical_bigbio_kb",
85
+ version=BIGBIO_VERSION,
86
+ description="SCAI Chemical BigBio schema",
87
+ schema="bigbio_kb",
88
+ subset_id="scai_chemical",
89
+ ),
90
+ ]
91
+
92
+ DEFAULT_CONFIG_NAME = "scai_chemical_source"
93
+
94
+ def _info(self) -> datasets.DatasetInfo:
95
+ if self.config.schema == "source":
96
+ features = datasets.Features(
97
+ {
98
+ "document_id": datasets.Value("string"),
99
+ "text": datasets.Value("string"),
100
+ "tokens": [
101
+ {
102
+ "offsets": [datasets.Value("int64")],
103
+ "text": datasets.Value("string"),
104
+ "tag": datasets.Value("string"),
105
+ }
106
+ ],
107
+ "entities": [
108
+ {
109
+ "offsets": [datasets.Value("int64")],
110
+ "text": datasets.Value("string"),
111
+ "type": datasets.Value("string"),
112
+ }
113
+ ],
114
+ }
115
+ )
116
+
117
+ elif self.config.schema == "bigbio_kb":
118
+ features = kb_features
119
+ else:
120
+ raise ValueError("Unrecognized schema: %s" % self.config.schema)
121
+
122
+ return datasets.DatasetInfo(
123
+ description=_DESCRIPTION,
124
+ features=features,
125
+ homepage=_HOMEPAGE,
126
+ license=str(_LICENSE),
127
+ citation=_CITATION,
128
+ )
129
+
130
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
131
+ """Returns SplitGenerators."""
132
+ url = _URLS[_DATASETNAME]
133
+ filepath = dl_manager.download(url)
134
+
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TRAIN,
138
+ gen_kwargs={
139
+ "filepath": filepath,
140
+ "split": "train",
141
+ },
142
+ ),
143
+ ]
144
+
145
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
146
+ """Yields examples as (key, example) tuples."""
147
+
148
+ # Iterates through lines in file, collecting all lines belonging
149
+ # to an example and converting into a single dict
150
+ examples = []
151
+ tokens = None
152
+ with gzip.open(filepath, "rt", encoding="mac_roman") as data_file:
153
+ print(filepath)
154
+ for line in data_file:
155
+ line = line.strip()
156
+ if line.startswith("###"):
157
+ tokens = [line]
158
+ elif line == "":
159
+ examples.append(self._make_example(tokens))
160
+ else:
161
+ tokens.append(line)
162
+
163
+ # Returns the examples using the desired schema
164
+ if self.config.schema == "source":
165
+ for i, example in enumerate(examples):
166
+ yield i, example
167
+
168
+ elif self.config.schema == "bigbio_kb":
169
+ for i, example in enumerate(examples):
170
+ bigbio_example = {
171
+ "id": "example-" + str(i),
172
+ "document_id": example["document_id"],
173
+ "passages": [
174
+ {
175
+ "id": "passage-" + str(i),
176
+ "type": "abstract",
177
+ "text": [example["text"]],
178
+ "offsets": [[0, len(example["text"])]],
179
+ }
180
+ ],
181
+ "entities": [],
182
+ "events": [],
183
+ "coreferences": [],
184
+ "relations": [],
185
+ }
186
+
187
+ # Converts entities to BigBio format
188
+ for j, entity in enumerate(example["entities"]):
189
+ bigbio_example["entities"].append(
190
+ {
191
+ "id": "entity-" + str(i) + "-" + str(j),
192
+ "offsets": [entity["offsets"]],
193
+ "text": [entity["text"]],
194
+ "type": entity["type"],
195
+ "normalized": [],
196
+ }
197
+ )
198
+
199
+ yield i, bigbio_example
200
+
201
+ @staticmethod
202
+ def _make_example(tokens):
203
+ """
204
+ Converts a list of lines representing tokens into an example dictionary
205
+ formatted according to the source schema
206
+
207
+ :param tokens: list of strings
208
+ :return: dictionary in the source schema
209
+ """
210
+ document_id = tokens[0][4:]
211
+
212
+ text = ""
213
+ processed_tokens = []
214
+ entities = []
215
+ last_offset = 0
216
+
217
+ for token in tokens[1:]:
218
+ token_pieces = token.split("\t")
219
+ if len(token_pieces) != 5:
220
+ raise ValueError("Failed to parse line: %s" % token)
221
+
222
+ token_text = str(token_pieces[0])
223
+ token_start = int(token_pieces[1])
224
+ token_end = int(token_pieces[2])
225
+ entity_text = str(token_pieces[3])
226
+ token_tag = str(token_pieces[4])[1:]
227
+
228
+ if token_start > last_offset:
229
+ for _ in range(token_start - last_offset):
230
+ text += " "
231
+ elif token_start < last_offset:
232
+ raise ValueError("Invalid start index: %s" % token)
233
+ last_offset = token_end
234
+
235
+ text += token_text
236
+ processed_tokens.append(
237
+ {
238
+ "offsets": [token_start, token_end],
239
+ "text": token_text,
240
+ "tag": token_tag,
241
+ }
242
+ )
243
+ if entity_text != "":
244
+ entities.append(
245
+ {
246
+ "offsets": [token_start, token_start + len(entity_text)],
247
+ "text": entity_text,
248
+ "type": token_tag[2:],
249
+ }
250
+ )
251
+
252
+ return {
253
+ "document_id": document_id,
254
+ "text": text,
255
+ "entities": entities,
256
+ "tokens": processed_tokens,
257
+ }