Datasets:

Modalities:
Text
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
6bb60df
·
1 Parent(s): d61a6f7

upload hubscripts/tmvar_v2_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. tmvar_v2.py +291 -0
tmvar_v2.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ import itertools
18
+ import os
19
+ from pydoc import doc
20
+ from typing import Dict, Iterator, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from .bigbiohub import kb_features
25
+ from .bigbiohub import BigBioConfig
26
+ from .bigbiohub import Tasks
27
+
28
+ _LANGUAGES = ['English']
29
+ _PUBMED = True
30
+ _LOCAL = False
31
+ _CITATION = """\
32
+ @article{wei2018tmvar,
33
+ title={tmVar 2.0: integrating genomic variant information from literature with dbSNP and ClinVar for precision medicine},
34
+ author={Wei, Chih-Hsuan and Phan, Lon and Feltz, Juliana and Maiti, Rama and Hefferon, Tim and Lu, Zhiyong},
35
+ journal={Bioinformatics},
36
+ volume={34},
37
+ number={1},
38
+ pages={80--87},
39
+ year={2018},
40
+ publisher={Oxford University Press}
41
+ }
42
+ """
43
+
44
+ _DATASETNAME = "tmvar_v2"
45
+ _DISPLAYNAME = "tmVar v2"
46
+
47
+ _DESCRIPTION = """This dataset contains 158 PubMed articles manually annotated with mutation mentions of various kinds and dbsnp normalizations for each of them.
48
+ It can be used for NER tasks and NED tasks, This dataset has a single split"""
49
+
50
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/research/bionlp/Tools/tmvar/"
51
+
52
+ _LICENSE = 'License information unavailable'
53
+
54
+ _URLS = {
55
+ _DATASETNAME: "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmTools/download/tmVar/tmVar.Normalization.txt",
56
+ }
57
+
58
+ _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION, Tasks.NAMED_ENTITY_DISAMBIGUATION]
59
+
60
+ _SOURCE_VERSION = "2.0.0"
61
+
62
+ _BIGBIO_VERSION = "1.0.0"
63
+
64
+ logger = datasets.utils.logging.get_logger(__name__)
65
+
66
+
67
+ class TmvarV2Dataset(datasets.GeneratorBasedBuilder):
68
+ """
69
+ This dataset contains 158 PubMed articles manually annotated with mutation mentions of various kinds and dbsnp normalizations for each of them.
70
+ """
71
+
72
+ DEFAULT_CONFIG_NAME = "tmvar_v2_source"
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
75
+
76
+ BUILDER_CONFIGS = []
77
+ BUILDER_CONFIGS.append(
78
+ BigBioConfig(
79
+ name=f"{_DATASETNAME}_source",
80
+ version=SOURCE_VERSION,
81
+ description=f"{_DATASETNAME} source schema",
82
+ schema="source",
83
+ subset_id=f"{_DATASETNAME}",
84
+ )
85
+ )
86
+ BUILDER_CONFIGS.append(
87
+ BigBioConfig(
88
+ name=f"{_DATASETNAME}_bigbio_kb",
89
+ version=BIGBIO_VERSION,
90
+ description=f"{_DATASETNAME} BigBio schema",
91
+ schema="bigbio_kb",
92
+ subset_id=f"{_DATASETNAME}",
93
+ )
94
+ )
95
+
96
+ def _info(self) -> datasets.DatasetInfo:
97
+
98
+ if self.config.schema == "source":
99
+ features = datasets.Features(
100
+ {
101
+ "pmid": datasets.Value("string"),
102
+ "passages": [
103
+ {
104
+ "type": datasets.Value("string"),
105
+ "text": datasets.Value("string"),
106
+ "offsets": [datasets.Value("int32")],
107
+ }
108
+ ],
109
+ "entities": [
110
+ {
111
+ "text": datasets.Value("string"),
112
+ "offsets": [datasets.Value("int32")],
113
+ "concept_id": datasets.Value("string"),
114
+ "semantic_type_id": datasets.Value("string"),
115
+ "rsid": datasets.Value("string"),
116
+ }
117
+ ],
118
+ }
119
+ )
120
+ elif self.config.schema == "bigbio_kb":
121
+ features = kb_features
122
+
123
+ return datasets.DatasetInfo(
124
+ description=_DESCRIPTION,
125
+ features=features,
126
+ homepage=_HOMEPAGE,
127
+ license=str(_LICENSE),
128
+ citation=_CITATION,
129
+ )
130
+
131
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
132
+ """Returns SplitGenerators."""
133
+
134
+ url = _URLS[_DATASETNAME]
135
+ train_filepath = dl_manager.download(url)
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=datasets.Split.TRAIN,
139
+ gen_kwargs={
140
+ "filepath": train_filepath,
141
+ },
142
+ )
143
+ ]
144
+
145
+ def _generate_examples(self, filepath) -> Tuple[int, Dict]:
146
+ """Yields examples as (key, example) tuples."""
147
+ if self.config.schema == "source":
148
+ with open(filepath, "r", encoding="utf8") as fstream:
149
+ for raw_document in self.generate_raw_docs(fstream):
150
+ document = self.parse_raw_doc(raw_document)
151
+ yield document["pmid"], document
152
+
153
+ elif self.config.schema == "bigbio_kb":
154
+ with open(filepath, "r", encoding="utf8") as fstream:
155
+ uid = itertools.count(0)
156
+ for raw_document in self.generate_raw_docs(fstream):
157
+ document = self.parse_raw_doc(raw_document)
158
+ document["id"] = next(uid)
159
+ document["document_id"] = document.pop("pmid")
160
+
161
+ entities_ = []
162
+ for entity in document["entities"]:
163
+ if entity.get("rsid", ""):
164
+ normalized = [
165
+ {
166
+ "db_name": "dbsnp",
167
+ "db_id": entity.get("rsid").split(":")[1],
168
+ }
169
+ ]
170
+ else:
171
+ normalized = []
172
+
173
+ entities_.append(
174
+ {
175
+ "id": next(uid),
176
+ "type": entity["semantic_type_id"],
177
+ "text": [entity["text"]],
178
+ "normalized": normalized,
179
+ "offsets": [entity["offsets"]],
180
+ }
181
+ )
182
+ for passage in document["passages"]:
183
+ passage["id"] = next(uid)
184
+
185
+ document["entities"] = entities_
186
+ document["relations"] = []
187
+ document["events"] = []
188
+ document["coreferences"] = []
189
+
190
+ yield document["document_id"], document
191
+
192
+ def generate_raw_docs(self, fstream):
193
+ """
194
+ Given a filestream, this function yields documents from it
195
+ """
196
+ raw_document = []
197
+ for line in fstream:
198
+ if line.strip():
199
+ raw_document.append(line.strip())
200
+ elif raw_document:
201
+ yield raw_document
202
+ raw_document = []
203
+ if raw_document:
204
+ yield raw_document
205
+
206
+ def parse_raw_doc(self, raw_doc):
207
+ pmid, _, title = raw_doc[0].split("|")
208
+ pmid = int(pmid)
209
+ _, _, abstract = raw_doc[1].split("|")
210
+
211
+ if self.config.schema == "source":
212
+ passages = [
213
+ {"type": "title", "text": title, "offsets": [0, len(title)]},
214
+ {
215
+ "type": "abstract",
216
+ "text": abstract,
217
+ "offsets": [len(title) + 1, len(title) + len(abstract) + 1],
218
+ },
219
+ ]
220
+ elif self.config.schema == "bigbio_kb":
221
+ passages = [
222
+ {"type": "title", "text": [title], "offsets": [[0, len(title)]]},
223
+ {
224
+ "type": "abstract",
225
+ "text": [abstract],
226
+ "offsets": [[len(title) + 1, len(title) + len(abstract) + 1]],
227
+ },
228
+ ]
229
+
230
+ entities = []
231
+ for count, line in enumerate(raw_doc[2:]):
232
+ line_pieces = line.split("\t")
233
+ if len(line_pieces) == 6:
234
+ if pmid == 18166824 and count == 0:
235
+ # this example has the following text
236
+ # 18166824 880 948 amino acid (proline) with a polar amino acid (serine) at position 29 p|SUB|P|29|S RSID:2075789
237
+ # it is missing the semantic_type_id between `... position 29` and `p|SUB|P|29|S`
238
+ pmid_ = str(pmid)
239
+ start_idx = "880"
240
+ end_idx = "948"
241
+ mention = "amino acid (proline) with a polar amino acid (serine) at position 29"
242
+ semantic_type_id = "ProteinMutation"
243
+ entity_id = "p|SUB|P|29|S"
244
+ rsid = "RSID:2075789"
245
+ assert line_pieces[0] == pmid_
246
+ assert line_pieces[1] == start_idx
247
+ assert line_pieces[2] == end_idx
248
+ assert line_pieces[3] == mention
249
+ assert line_pieces[4] == entity_id
250
+ assert line_pieces[5] == rsid
251
+ logger.info(
252
+ f"Adding ProteinMutation semantic_type_id in Document ID: {pmid} Line: {line}"
253
+ )
254
+ else:
255
+ (
256
+ pmid_,
257
+ start_idx,
258
+ end_idx,
259
+ mention,
260
+ semantic_type_id,
261
+ entity_id,
262
+ ) = line_pieces
263
+ rsid = None
264
+
265
+ elif len(line_pieces) == 7:
266
+ (
267
+ pmid_,
268
+ start_idx,
269
+ end_idx,
270
+ mention,
271
+ semantic_type_id,
272
+ entity_id,
273
+ rsid,
274
+ ) = line_pieces
275
+
276
+ else:
277
+ logger.info(
278
+ f"Inconsistent entity format found. Skipping Document ID: {pmid} Line: {line}"
279
+ )
280
+ continue
281
+
282
+ entity = {
283
+ "offsets": [int(start_idx), int(end_idx)],
284
+ "text": mention,
285
+ "semantic_type_id": semantic_type_id,
286
+ "concept_id": entity_id,
287
+ "rsid": rsid,
288
+ }
289
+ entities.append(entity)
290
+
291
+ return {"pmid": pmid, "passages": passages, "entities": entities}