Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
c4b832b
·
1 Parent(s): 76387fe

upload hubscripts/iepa_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. iepa.py +287 -0
iepa.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ The IEPA benchmark PPI corpus is designed for relation extraction. It was
18
+ created from 303 PubMed abstracts, each of which contains a specific pair of
19
+ co-occurring chemicals.
20
+ """
21
+
22
+ # Comment from Author
23
+ # BigBio schema fixes offsets of entities to an offset where 0 is the start of the document.
24
+ # (In source offsets of entities start from 0 for each passage in document)
25
+ # Offsets of entities in source remain unchanged.
26
+
27
+ import xml.dom.minidom as xml
28
+ from typing import Dict, List, Tuple
29
+
30
+ import datasets
31
+
32
+ from .bigbiohub import kb_features
33
+ from .bigbiohub import BigBioConfig
34
+ from .bigbiohub import Tasks
35
+
36
+ _LANGUAGES = ['English']
37
+ _PUBMED = True
38
+ _LOCAL = False
39
+ _CITATION = """\
40
+ @ARTICLE{ding2001mining,
41
+ title = "Mining {MEDLINE}: abstracts, sentences, or phrases?",
42
+ author = "Ding, J and Berleant, D and Nettleton, D and Wurtele, E",
43
+ journal = "Pac Symp Biocomput",
44
+ pages = "326--337",
45
+ year = 2002,
46
+ address = "United States",
47
+ language = "en"
48
+ }
49
+ """
50
+
51
+ _DATASETNAME = "iepa"
52
+ _DISPLAYNAME = "IEPA"
53
+
54
+ _DESCRIPTION = """\
55
+ The IEPA benchmark PPI corpus is designed for relation extraction. It was \
56
+ created from 303 PubMed abstracts, each of which contains a specific pair of \
57
+ co-occurring chemicals.
58
+ """
59
+
60
+ _HOMEPAGE = "http://psb.stanford.edu/psb-online/proceedings/psb02/abstracts/p326.html"
61
+
62
+ _LICENSE = 'License information unavailable'
63
+
64
+ _URLS = {
65
+ _DATASETNAME: {
66
+ "train": "https://raw.githubusercontent.com/metalrt/ppi-dataset/master/csv_output/IEPA-train.xml",
67
+ "test": "https://raw.githubusercontent.com/metalrt/ppi-dataset/master/csv_output/IEPA-test.xml",
68
+ },
69
+ }
70
+
71
+ _SUPPORTED_TASKS = [Tasks.RELATION_EXTRACTION]
72
+
73
+ _SOURCE_VERSION = "1.0.0"
74
+
75
+ _BIGBIO_VERSION = "1.0.0"
76
+
77
+
78
+ class IepaDataset(datasets.GeneratorBasedBuilder):
79
+ """The IEPA benchmark PPI corpus is designed for relation extraction."""
80
+
81
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
82
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
83
+
84
+ BUILDER_CONFIGS = [
85
+ BigBioConfig(
86
+ name="iepa_source",
87
+ version=SOURCE_VERSION,
88
+ description="IEPA source schema",
89
+ schema="source",
90
+ subset_id="iepa",
91
+ ),
92
+ BigBioConfig(
93
+ name="iepa_bigbio_kb",
94
+ version=BIGBIO_VERSION,
95
+ description="IEPA BigBio schema",
96
+ schema="bigbio_kb",
97
+ subset_id="iepa",
98
+ ),
99
+ ]
100
+
101
+ DEFAULT_CONFIG_NAME = "iepa_source"
102
+
103
+ def _info(self) -> datasets.DatasetInfo:
104
+
105
+ if self.config.schema == "source":
106
+ features = datasets.Features(
107
+ {
108
+ "id": datasets.Value("string"),
109
+ "PMID": datasets.Value("string"),
110
+ "origID": datasets.Value("string"),
111
+ "sentences": [
112
+ {
113
+ "id": datasets.Value("string"),
114
+ "origID": datasets.Value("string"),
115
+ "offsets": [datasets.Value("int32")],
116
+ "text": datasets.Value("string"),
117
+ "entities": [
118
+ {
119
+ "id": datasets.Value("string"),
120
+ "origID": datasets.Value("string"),
121
+ "text": datasets.Value("string"),
122
+ "offsets": [datasets.Value("int32")],
123
+ }
124
+ ],
125
+ "interactions": [
126
+ {
127
+ "id": datasets.Value("string"),
128
+ "e1": datasets.Value("string"),
129
+ "e2": datasets.Value("string"),
130
+ "type": datasets.Value("string"),
131
+ }
132
+ ],
133
+ }
134
+ ],
135
+ }
136
+ )
137
+
138
+ elif self.config.schema == "bigbio_kb":
139
+ features = kb_features
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=str(_LICENSE),
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
150
+ """Returns SplitGenerators."""
151
+
152
+ urls = _URLS[_DATASETNAME]
153
+ data_dir = dl_manager.download_and_extract(urls)
154
+
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ gen_kwargs={
159
+ "filepath": data_dir["train"],
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
+ gen_kwargs={
165
+ "filepath": data_dir["test"],
166
+ },
167
+ ),
168
+ ]
169
+
170
+ def _generate_examples(self, filepath) -> Tuple[int, Dict]:
171
+ """Yields examples as (key, example) tuples."""
172
+
173
+ collection = xml.parse(filepath).documentElement
174
+
175
+ if self.config.schema == "source":
176
+ for id, document in self._parse_documents(collection):
177
+ yield id, document
178
+
179
+ elif self.config.schema == "bigbio_kb":
180
+ for id, document in self._parse_documents(collection):
181
+ yield id, self._source_to_bigbio(document)
182
+
183
+ def _parse_documents(self, collection):
184
+ for document in collection.getElementsByTagName("document"):
185
+ pmid_doc = self._strict_get_attribute(document, "PMID")
186
+ id_doc = self._strict_get_attribute(document, "id")
187
+ origID_doc = self._strict_get_attribute(document, "origID")
188
+ sentences = []
189
+ for sentence in document.getElementsByTagName("sentence"):
190
+ offsets_sent = self._strict_get_attribute(sentence, "charOffset").split(
191
+ "-"
192
+ )
193
+ id_sent = self._strict_get_attribute(sentence, "id")
194
+ origID_sent = self._strict_get_attribute(sentence, "origID")
195
+ text_sent = self._strict_get_attribute(sentence, "text")
196
+
197
+ entities = []
198
+ for entity in sentence.getElementsByTagName("entity"):
199
+ id_ent = self._strict_get_attribute(entity, "id")
200
+ origID_ent = self._strict_get_attribute(entity, "origID")
201
+ text_ent = self._strict_get_attribute(entity, "text")
202
+ offsets_ent = self._strict_get_attribute(
203
+ entity, "charOffset"
204
+ ).split("-")
205
+ entities.append(
206
+ {
207
+ "id": id_ent,
208
+ "origID": origID_ent,
209
+ "text": text_ent,
210
+ "offsets": offsets_ent,
211
+ }
212
+ )
213
+
214
+ interactions = []
215
+ for interaction in sentence.getElementsByTagName("interaction"):
216
+ id_int = self._strict_get_attribute(interaction, "id")
217
+ e1_int = self._strict_get_attribute(interaction, "e1")
218
+ e2_int = self._strict_get_attribute(interaction, "e2")
219
+ type_int = self._strict_get_attribute(interaction, "type")
220
+ interactions.append(
221
+ {"id": id_int, "e1": e1_int, "e2": e2_int, "type": type_int}
222
+ )
223
+
224
+ sentences.append(
225
+ {
226
+ "id": id_sent,
227
+ "origID": origID_sent,
228
+ "offsets": offsets_sent,
229
+ "text": text_sent,
230
+ "entities": entities,
231
+ "interactions": interactions,
232
+ }
233
+ )
234
+ yield id_doc, {
235
+ "id": id_doc,
236
+ "PMID": pmid_doc,
237
+ "origID": origID_doc,
238
+ "sentences": sentences,
239
+ }
240
+
241
+ def _strict_get_attribute(self, element, key):
242
+ if element.hasAttribute(key):
243
+ return element.getAttribute(key)
244
+ else:
245
+ raise ValueError(f"No such key exists in element: {element.tagName} {key}")
246
+
247
+ def _source_to_bigbio(self, document_):
248
+ document = {}
249
+ document["id"] = document_["id"]
250
+ document["document_id"] = document_["PMID"]
251
+
252
+ passages = []
253
+ entities = []
254
+ relations = []
255
+ for sentence_ in document_["sentences"]:
256
+ for entity_ in sentence_["entities"]:
257
+ entity_["type"] = ""
258
+ entity_["normalized"] = []
259
+ entity_.pop("origID")
260
+ entity_["text"] = [entity_["text"]]
261
+ entity_["offsets"] = [
262
+ [
263
+ int(sentence_["offsets"][0]) + int(entity_["offsets"][0]),
264
+ int(sentence_["offsets"][0]) + int(entity_["offsets"][1]),
265
+ ]
266
+ ]
267
+ entities.append(entity_)
268
+ for relation_ in sentence_["interactions"]:
269
+ relation_["arg1_id"] = relation_.pop("e1")
270
+ relation_["arg2_id"] = relation_.pop("e2")
271
+ relation_["normalized"] = []
272
+ relations.append(relation_)
273
+
274
+ sentence_.pop("entities")
275
+ sentence_.pop("interactions")
276
+ sentence_.pop("origID")
277
+ sentence_["type"] = ""
278
+ sentence_["text"] = [sentence_["text"]]
279
+ sentence_["offsets"] = [sentence_["offsets"]]
280
+ passages.append(sentence_)
281
+
282
+ document["passages"] = passages
283
+ document["entities"] = entities
284
+ document["relations"] = relations
285
+ document["events"] = []
286
+ document["coreferences"] = []
287
+ return document