Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
837c7b1
·
1 Parent(s): 386ddf4

upload hub_repos/drugprot/drugprot.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. drugprot.py +260 -0
drugprot.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ The DrugProt corpus consists of a) expert-labelled chemical and gene mentions, and (b) all binary relationships
17
+ between them corresponding to a specific set of biologically relevant relation types. The corpus was introduced
18
+ in context of the BioCreative VII Track 1 (Text mining drug and chemical-protein interactions).
19
+
20
+ For further information see:
21
+ https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/
22
+ """
23
+ import collections
24
+ from pathlib import Path
25
+ from typing import Dict, Iterator, Tuple
26
+
27
+ import datasets
28
+
29
+ from .bigbiohub import kb_features
30
+ from .bigbiohub import BigBioConfig
31
+ from .bigbiohub import Tasks
32
+
33
+ _LANGUAGES = ['English']
34
+ _PUBMED = True
35
+ _LOCAL = False
36
+ _CITATION = """\
37
+ @inproceedings{miranda2021overview,
38
+ title={Overview of DrugProt BioCreative VII track: quality evaluation and large scale text mining of \
39
+ drug-gene/protein relations},
40
+ author={Miranda, Antonio and Mehryary, Farrokh and Luoma, Jouni and Pyysalo, Sampo and Valencia, Alfonso \
41
+ and Krallinger, Martin},
42
+ booktitle={Proceedings of the seventh BioCreative challenge evaluation workshop},
43
+ year={2021}
44
+ }
45
+ """
46
+
47
+ _DATASETNAME = "drugprot"
48
+ _DISPLAYNAME = "DrugProt"
49
+
50
+
51
+ _DESCRIPTION = """\
52
+ The DrugProt corpus consists of a) expert-labelled chemical and gene mentions, and (b) all binary relationships \
53
+ between them corresponding to a specific set of biologically relevant relation types.
54
+ """
55
+
56
+ _HOMEPAGE = "https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vii/track-1/"
57
+
58
+ _LICENSE = 'Creative Commons Attribution 4.0 International'
59
+
60
+ _URLS = {_DATASETNAME: "https://zenodo.org/record/5119892/files/drugprot-training-development-test-background.zip?download=1"}
61
+
62
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
63
+
64
+ _SOURCE_VERSION = "1.0.2"
65
+ _BIGBIO_VERSION = "1.0.0"
66
+
67
+
68
+ class DrugProtDataset(datasets.GeneratorBasedBuilder):
69
+ """
70
+ The DrugProt corpus consists of a) expert-labelled chemical and gene mentions, and \
71
+ (b) all binary relationships between them.
72
+ """
73
+
74
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
75
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
76
+
77
+ BUILDER_CONFIGS = [
78
+ BigBioConfig(
79
+ name="drugprot_source",
80
+ version=SOURCE_VERSION,
81
+ description="DrugProt source schema",
82
+ schema="source",
83
+ subset_id="drugprot",
84
+ ),
85
+ BigBioConfig(
86
+ name="drugprot_bigbio_kb",
87
+ version=BIGBIO_VERSION,
88
+ description="DrugProt BigBio schema",
89
+ schema="bigbio_kb",
90
+ subset_id="drugprot",
91
+ ),
92
+ ]
93
+
94
+ DEFAULT_CONFIG_NAME = "drugprot_source"
95
+
96
+ def _info(self):
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "document_id": datasets.Value("string"),
101
+ "title": datasets.Value("string"),
102
+ "abstract": datasets.Value("string"),
103
+ "text": datasets.Value("string"),
104
+ "entities": [
105
+ {
106
+ "id": datasets.Value("string"),
107
+ "type": datasets.Value("string"),
108
+ "text": datasets.Value("string"),
109
+ "offset": datasets.Sequence(datasets.Value("int32")),
110
+ }
111
+ ],
112
+ "relations": [
113
+ {
114
+ "id": datasets.Value("string"),
115
+ "type": datasets.Value("string"),
116
+ "arg1_id": datasets.Value("string"),
117
+ "arg2_id": datasets.Value("string"),
118
+ }
119
+ ],
120
+ }
121
+ )
122
+
123
+ elif self.config.schema == "bigbio_kb":
124
+ features = kb_features
125
+
126
+ return datasets.DatasetInfo(
127
+ description=_DESCRIPTION,
128
+ features=features,
129
+ homepage=_HOMEPAGE,
130
+ license=str(_LICENSE),
131
+ citation=_CITATION,
132
+ )
133
+
134
+ def _split_generators(self, dl_manager):
135
+ urls = _URLS[_DATASETNAME]
136
+ data_dir = Path(dl_manager.download_and_extract(urls))
137
+ data_dir = data_dir / "drugprot-gs-training-development"
138
+
139
+ return [
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.TRAIN,
142
+ gen_kwargs={"data_dir": data_dir, "split": "training"},
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.VALIDATION,
146
+ gen_kwargs={"data_dir": data_dir, "split": "development"},
147
+ ),
148
+ ]
149
+
150
+ def _generate_examples(self, data_dir: Path, split: str) -> Iterator[Tuple[str, Dict]]:
151
+ if self.config.name == "drugprot_source":
152
+ documents = self._read_source_examples(data_dir, split)
153
+ for document_id, document in documents.items():
154
+ yield document_id, document
155
+
156
+ elif self.config.name == "drugprot_bigbio_kb":
157
+ documents = self._read_source_examples(data_dir, split)
158
+ for document_id, document in documents.items():
159
+ yield document_id, self._transform_source_to_kb(document)
160
+
161
+ def _read_source_examples(self, input_dir: Path, split: str) -> Dict:
162
+ """ """
163
+ split_dir = input_dir / split
164
+ abstracts_file = split_dir / f"drugprot_{split}_abstracs.tsv"
165
+ entities_file = split_dir / f"drugprot_{split}_entities.tsv"
166
+ relations_file = split_dir / f"drugprot_{split}_relations.tsv"
167
+
168
+ document_to_entities = collections.defaultdict(list)
169
+ for line in entities_file.read_text().splitlines():
170
+ columns = line.split("\t")
171
+ document_id = columns[0]
172
+
173
+ document_to_entities[document_id].append(
174
+ {
175
+ "id": document_id + "_" + columns[1],
176
+ "type": columns[2],
177
+ "offset": [columns[3], columns[4]],
178
+ "text": columns[5],
179
+ }
180
+ )
181
+
182
+ document_to_relations = collections.defaultdict(list)
183
+ for line in relations_file.read_text().splitlines():
184
+ columns = line.split("\t")
185
+ document_id = columns[0]
186
+
187
+ document_relations = document_to_relations[document_id]
188
+
189
+ document_relations.append(
190
+ {
191
+ "id": document_id + "_" + str(len(document_relations)),
192
+ "type": columns[1],
193
+ "arg1_id": document_id + "_" + columns[2][5:],
194
+ "arg2_id": document_id + "_" + columns[3][5:],
195
+ }
196
+ )
197
+
198
+ document_to_source = {}
199
+ for line in abstracts_file.read_text().splitlines():
200
+ document_id, title, abstract = line.split("\t")
201
+
202
+ document_to_source[document_id] = {
203
+ "document_id": document_id,
204
+ "title": title,
205
+ "abstract": abstract,
206
+ "text": " ".join([title, abstract]),
207
+ "entities": document_to_entities[document_id],
208
+ "relations": document_to_relations[document_id],
209
+ }
210
+
211
+ return document_to_source
212
+
213
+ def _transform_source_to_kb(self, source_document: Dict) -> Dict:
214
+ document_id = source_document["document_id"]
215
+
216
+ offset = 0
217
+ passages = []
218
+ for text_field in ["title", "abstract"]:
219
+ text = source_document[text_field]
220
+ passages.append(
221
+ {
222
+ "id": document_id + "_" + text_field,
223
+ "type": text_field,
224
+ "text": [text],
225
+ "offsets": [[offset, offset + len(text)]],
226
+ }
227
+ )
228
+ offset += len(text) + 1
229
+
230
+ entities = [
231
+ {
232
+ "id": entity["id"],
233
+ "type": entity["type"],
234
+ "text": [entity["text"]],
235
+ "offsets": [entity["offset"]],
236
+ "normalized": [],
237
+ }
238
+ for entity in source_document["entities"]
239
+ ]
240
+
241
+ relations = [
242
+ {
243
+ "id": relation["id"],
244
+ "type": relation["type"],
245
+ "arg1_id": relation["arg1_id"],
246
+ "arg2_id": relation["arg2_id"],
247
+ "normalized": [],
248
+ }
249
+ for relation in source_document["relations"]
250
+ ]
251
+
252
+ return {
253
+ "id": document_id,
254
+ "document_id": document_id,
255
+ "passages": passages,
256
+ "entities": entities,
257
+ "relations": relations,
258
+ "events": [],
259
+ "coreferences": [],
260
+ }