Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay commited on
Commit
2c29259
1 Parent(s): 49de995

upload hubscripts/bionlp_st_2019_bb_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bionlp_st_2019_bb.py +594 -0
bionlp_st_2019_bb.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List
18
+
19
+ import datasets
20
+
21
+ from .bigbiohub import kb_features
22
+ from .bigbiohub import BigBioConfig
23
+ from .bigbiohub import Tasks
24
+
25
+ _DATASETNAME = "bionlp_st_2019_bb"
26
+ _DISPLAYNAME = "BioNLP 2019 BB"
27
+
28
+ _SOURCE_VIEW_NAME = "source"
29
+ _UNIFIED_VIEW_NAME = "bigbio"
30
+
31
+ _LANGUAGES = ['English']
32
+ _PUBMED = True
33
+ _LOCAL = False
34
+ _CITATION = """\
35
+ @inproceedings{bossy-etal-2019-bacteria,
36
+ title = "Bacteria Biotope at {B}io{NLP} Open Shared Tasks 2019",
37
+ author = "Bossy, Robert and
38
+ Del{\'e}ger, Louise and
39
+ Chaix, Estelle and
40
+ Ba, Mouhamadou and
41
+ N{\'e}dellec, Claire",
42
+ booktitle = "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks",
43
+ month = nov,
44
+ year = "2019",
45
+ address = "Hong Kong, China",
46
+ publisher = "Association for Computational Linguistics",
47
+ url = "https://aclanthology.org/D19-5719",
48
+ doi = "10.18653/v1/D19-5719",
49
+ pages = "121--131",
50
+ abstract = "This paper presents the fourth edition of the Bacteria
51
+ Biotope task at BioNLP Open Shared Tasks 2019. The task focuses on
52
+ the extraction of the locations and phenotypes of microorganisms
53
+ from PubMed abstracts and full-text excerpts, and the characterization
54
+ of these entities with respect to reference knowledge sources (NCBI
55
+ taxonomy, OntoBiotope ontology). The task is motivated by the importance
56
+ of the knowledge on biodiversity for fundamental research and applications
57
+ in microbiology. The paper describes the different proposed subtasks, the
58
+ corpus characteristics, and the challenge organization. We also provide an
59
+ analysis of the results obtained by participants, and inspect the evolution
60
+ of the results since the last edition in 2016.",
61
+ }
62
+ """
63
+
64
+ _DESCRIPTION = """\
65
+ The task focuses on the extraction of the locations and phenotypes of
66
+ microorganisms from PubMed abstracts and full-text excerpts, and the
67
+ characterization of these entities with respect to reference knowledge
68
+ sources (NCBI taxonomy, OntoBiotope ontology). The task is motivated by
69
+ the importance of the knowledge on biodiversity for fundamental research
70
+ and applications in microbiology.
71
+
72
+ """
73
+
74
+ _HOMEPAGE = "https://sites.google.com/view/bb-2019/dataset"
75
+
76
+ _LICENSE = 'License information unavailable'
77
+
78
+ _URLs = {
79
+ "source": {
80
+ "norm": {
81
+ "train": "https://drive.google.com/uc?export=download&id=1aXbshxgytZ1Dhbmw7OULPFarPO1FbcM3",
82
+ "dev": "https://drive.google.com/uc?export=download&id=14jRZWF8VeluEYrV9EybV3LeGm4q5nH6s",
83
+ "test": "https://drive.google.com/uc?export=download&id=1BPDCFTVMmIlOowYA-DkeNNFjwTfHYPG6",
84
+ },
85
+ "norm+ner": {
86
+ "train": "https://drive.google.com/uc?export=download&id=1yKxBPMej8EYdVeU4QS1xquFfXM76F-2K",
87
+ "dev": "https://drive.google.com/uc?export=download&id=1Xk7h9bax533QWclO3Ur7aS07OATBF_bG",
88
+ "test": "https://drive.google.com/uc?export=download&id=1Cb5hQIPS3LIeUL-UWdqyWfKB52xUz9cp",
89
+ },
90
+ "rel": {
91
+ "train": "https://drive.google.com/uc?export=download&id=1gnc-ScNpssC3qrA7cVox4Iei7i96sYqC",
92
+ "dev": "https://drive.google.com/uc?export=download&id=1wJM9XOfmvIBcX23t9bzQX5fLZwWQJIwS",
93
+ "test": "https://drive.google.com/uc?export=download&id=1smhKA4LEPK5UJEyBLseq0mBaT9REUevu",
94
+ },
95
+ "rel+ner": {
96
+ "train": "https://drive.google.com/uc?export=download&id=1CPx9NxTPQbygqMtxw3d0hNFajhecqgss",
97
+ "dev": "https://drive.google.com/uc?export=download&id=1lVyCCuAJ5TmmTDz4S0dISBNiWGR745_7",
98
+ "test": "https://drive.google.com/uc?export=download&id=1uE8oY5m-7mSA-W-e6vownnAVV97IwHhA",
99
+ },
100
+ "kb": {
101
+ "train": "https://drive.google.com/uc?export=download&id=1Iuce3T_IArXWBbIJ7RXb_STaPnWKQBN-",
102
+ "dev": "https://drive.google.com/uc?export=download&id=14yON_Tc9dm8esWYDVxL-krw23sgTCcdL",
103
+ "test": "https://drive.google.com/uc?export=download&id=1wVqI_t9mirGUk71BkwkcKJv0VNGyaHDs",
104
+ },
105
+ "kb+ner": {
106
+ "train": "https://drive.google.com/uc?export=download&id=1WMl9eD4OZXq8zkkmHp3hSEvAqaAVui6L",
107
+ "dev": "https://drive.google.com/uc?export=download&id=1oOfOfjIfg1XnesXwaKvSDfqgnchuximG",
108
+ "test": "https://drive.google.com/uc?export=download&id=1_dRbgpGJUBCfF-iN2qOAgOBRvYmE7byW",
109
+ },
110
+ },
111
+ "bigbio_kb": {
112
+ "kb+ner": {
113
+ "train": "https://drive.google.com/uc?export=download&id=1WMl9eD4OZXq8zkkmHp3hSEvAqaAVui6L",
114
+ "dev": "https://drive.google.com/uc?export=download&id=1oOfOfjIfg1XnesXwaKvSDfqgnchuximG",
115
+ "test": "https://drive.google.com/uc?export=download&id=1_dRbgpGJUBCfF-iN2qOAgOBRvYmE7byW",
116
+ },
117
+ },
118
+ }
119
+
120
+ _SUPPORTED_TASKS = [
121
+ Tasks.NAMED_ENTITY_RECOGNITION,
122
+ Tasks.NAMED_ENTITY_DISAMBIGUATION,
123
+ Tasks.RELATION_EXTRACTION,
124
+ ]
125
+ _SOURCE_VERSION = "1.0.0"
126
+ _BIGBIO_VERSION = "1.0.0"
127
+
128
+
129
+ class bionlp_st_2019_bb(datasets.GeneratorBasedBuilder):
130
+ """This dataset is the fourth edition of the Bacteria
131
+ Biotope task at BioNLP Open Shared Tasks 2019"""
132
+
133
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
134
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
135
+
136
+ BUILDER_CONFIGS = [
137
+ BigBioConfig(
138
+ name="bionlp_st_2019_bb_norm_source",
139
+ version=SOURCE_VERSION,
140
+ description="bionlp_st_2019_bb entity normalization source schema",
141
+ schema="source",
142
+ subset_id="bionlp_st_2019_bb",
143
+ ),
144
+ BigBioConfig(
145
+ name="bionlp_st_2019_bb_norm+ner_source",
146
+ version=SOURCE_VERSION,
147
+ description="bionlp_st_2019_bb entity recognition and normalization source schema",
148
+ schema="source",
149
+ subset_id="bionlp_st_2019_bb",
150
+ ),
151
+ BigBioConfig(
152
+ name="bionlp_st_2019_bb_rel_source",
153
+ version=SOURCE_VERSION,
154
+ description="bionlp_st_2019_bb relation extraction source schema",
155
+ schema="source",
156
+ subset_id="bionlp_st_2019_bb",
157
+ ),
158
+ BigBioConfig(
159
+ name="bionlp_st_2019_bb_rel+ner_source",
160
+ version=SOURCE_VERSION,
161
+ description="bionlp_st_2019_bb entity recognition and relation extraction source schema",
162
+ schema="source",
163
+ subset_id="bionlp_st_2019_bb",
164
+ ),
165
+ BigBioConfig(
166
+ name="bionlp_st_2019_bb_kb_source",
167
+ version=SOURCE_VERSION,
168
+ description="bionlp_st_2019_bb entity normalization and relation extraction source schema",
169
+ schema="source",
170
+ subset_id="bionlp_st_2019_bb",
171
+ ),
172
+ BigBioConfig(
173
+ name="bionlp_st_2019_bb_kb+ner_source",
174
+ version=SOURCE_VERSION,
175
+ description="bionlp_st_2019_bb entity recognition and normalization and relation extraction source schema",
176
+ schema="source",
177
+ subset_id="bionlp_st_2019_bb",
178
+ ),
179
+ BigBioConfig(
180
+ name="bionlp_st_2019_bb_bigbio_kb",
181
+ version=BIGBIO_VERSION,
182
+ description="bionlp_st_2019_bb BigBio schema",
183
+ schema="bigbio_kb",
184
+ subset_id="bionlp_st_2019_bb",
185
+ ),
186
+ ]
187
+
188
+ DEFAULT_CONFIG_NAME = "bionlp_st_2019_bb_kb+ner_source"
189
+
190
+ def _info(self):
191
+ """
192
+ - `features` defines the schema of the parsed data set. The schema depends on the
193
+ chosen `config`: If it is `_SOURCE_VIEW_NAME` the schema is the schema of the
194
+ original data. If `config` is `_UNIFIED_VIEW_NAME`, then the schema is the
195
+ canonical KB-task schema defined in `biomedical/schemas/kb.py`.
196
+ """
197
+ if self.config.schema == "source":
198
+ features = datasets.Features(
199
+ {
200
+ "id": datasets.Value("string"),
201
+ "document_id": datasets.Value("string"),
202
+ "text": datasets.Value("string"),
203
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
204
+ {
205
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
206
+ "text": datasets.Sequence(datasets.Value("string")),
207
+ "type": datasets.Value("string"),
208
+ "id": datasets.Value("string"),
209
+ }
210
+ ],
211
+ "events": [ # E line in brat
212
+ {
213
+ "trigger": datasets.Value(
214
+ "string"
215
+ ), # refers to the text_bound_annotation of the trigger,
216
+ "id": datasets.Value("string"),
217
+ "type": datasets.Value("string"),
218
+ "arguments": datasets.Sequence(
219
+ {
220
+ "role": datasets.Value("string"),
221
+ "ref_id": datasets.Value("string"),
222
+ }
223
+ ),
224
+ }
225
+ ],
226
+ "relations": [ # R line in brat
227
+ {
228
+ "id": datasets.Value("string"),
229
+ "head": {
230
+ "ref_id": datasets.Value("string"),
231
+ "role": datasets.Value("string"),
232
+ },
233
+ "tail": {
234
+ "ref_id": datasets.Value("string"),
235
+ "role": datasets.Value("string"),
236
+ },
237
+ "type": datasets.Value("string"),
238
+ }
239
+ ],
240
+ "equivalences": [ # Equiv line in brat
241
+ {
242
+ "id": datasets.Value("string"),
243
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
244
+ }
245
+ ],
246
+ "attributes": [ # M or A lines in brat
247
+ {
248
+ "id": datasets.Value("string"),
249
+ "type": datasets.Value("string"),
250
+ "ref_id": datasets.Value("string"),
251
+ "value": datasets.Value("string"),
252
+ }
253
+ ],
254
+ "normalizations": [ # N lines in brat
255
+ {
256
+ "id": datasets.Value("string"),
257
+ "ref_id": datasets.Value("string"),
258
+ "resource_name": datasets.Value(
259
+ "string"
260
+ ), # Name of the resource, e.g. "Wikipedia"
261
+ "cuid": datasets.Value(
262
+ "string"
263
+ ), # ID in the resource, e.g. 534366
264
+ }
265
+ ],
266
+ },
267
+ )
268
+ elif self.config.schema == "bigbio_kb":
269
+ features = kb_features
270
+
271
+ return datasets.DatasetInfo(
272
+ description=_DESCRIPTION,
273
+ features=features,
274
+ homepage=_HOMEPAGE,
275
+ license=str(_LICENSE),
276
+ citation=_CITATION,
277
+ )
278
+
279
+ def _split_generators(
280
+ self, dl_manager: datasets.DownloadManager
281
+ ) -> List[datasets.SplitGenerator]:
282
+ version = self.config.name.split("_")[-2]
283
+ if version == "bigbio":
284
+ version = "kb+ner"
285
+ my_urls = _URLs[self.config.schema][version]
286
+ data_files = {
287
+ "train": Path(dl_manager.download_and_extract(my_urls["train"]))
288
+ / f"BioNLP-OST-2019_BB-{version}_train",
289
+ "dev": Path(dl_manager.download_and_extract(my_urls["dev"]))
290
+ / f"BioNLP-OST-2019_BB-{version}_dev",
291
+ "test": Path(dl_manager.download_and_extract(my_urls["test"]))
292
+ / f"BioNLP-OST-2019_BB-{version}_test",
293
+ }
294
+ return [
295
+ datasets.SplitGenerator(
296
+ name=datasets.Split.TRAIN,
297
+ gen_kwargs={"data_files": data_files["train"]},
298
+ ),
299
+ datasets.SplitGenerator(
300
+ name=datasets.Split.VALIDATION,
301
+ gen_kwargs={"data_files": data_files["dev"]},
302
+ ),
303
+ datasets.SplitGenerator(
304
+ name=datasets.Split.TEST,
305
+ gen_kwargs={"data_files": data_files["test"]},
306
+ ),
307
+ ]
308
+
309
+ def _generate_examples(self, data_files: Path):
310
+ if self.config.schema == "source":
311
+ txt_files = list(data_files.glob("*txt"))
312
+ for guid, txt_file in enumerate(txt_files):
313
+ example = self.parse_brat_file(txt_file)
314
+ example["id"] = str(guid)
315
+ yield guid, example
316
+ elif self.config.schema == "bigbio_kb":
317
+ txt_files = list(data_files.glob("*txt"))
318
+ for guid, txt_file in enumerate(txt_files):
319
+ example = parsing.brat_parse_to_bigbio_kb(
320
+ self.parse_brat_file(txt_file)
321
+ )
322
+ example["id"] = str(guid)
323
+ yield guid, example
324
+ else:
325
+ raise ValueError(f"Invalid config: {self.config.name}")
326
+
327
+ def parse_brat_file(
328
+ self,
329
+ txt_file: Path,
330
+ annotation_file_suffixes: List[str] = None,
331
+ parse_notes: bool = False,
332
+ ) -> Dict:
333
+ """
334
+ Parse a brat file into the schema defined below.
335
+ `txt_file` should be the path to the brat '.txt' file you want to parse, e.g. 'data/1234.txt'
336
+ Assumes that the annotations are contained in one or more of the corresponding '.a1', '.a2' or '.ann' files,
337
+ e.g. 'data/1234.ann' or 'data/1234.a1' and 'data/1234.a2'.
338
+
339
+ Will include annotator notes, when `parse_notes == True`.
340
+
341
+ brat_features = datasets.Features(
342
+ {
343
+ "id": datasets.Value("string"),
344
+ "document_id": datasets.Value("string"),
345
+ "text": datasets.Value("string"),
346
+ "text_bound_annotations": [ # T line in brat, e.g. type or event trigger
347
+ {
348
+ "offsets": datasets.Sequence([datasets.Value("int32")]),
349
+ "text": datasets.Sequence(datasets.Value("string")),
350
+ "type": datasets.Value("string"),
351
+ "id": datasets.Value("string"),
352
+ }
353
+ ],
354
+ "events": [ # E line in brat
355
+ {
356
+ "trigger": datasets.Value(
357
+ "string"
358
+ ), # refers to the text_bound_annotation of the trigger,
359
+ "id": datasets.Value("string"),
360
+ "type": datasets.Value("string"),
361
+ "arguments": datasets.Sequence(
362
+ {
363
+ "role": datasets.Value("string"),
364
+ "ref_id": datasets.Value("string"),
365
+ }
366
+ ),
367
+ }
368
+ ],
369
+ "relations": [ # R line in brat
370
+ {
371
+ "id": datasets.Value("string"),
372
+ "head": {
373
+ "ref_id": datasets.Value("string"),
374
+ "role": datasets.Value("string"),
375
+ },
376
+ "tail": {
377
+ "ref_id": datasets.Value("string"),
378
+ "role": datasets.Value("string"),
379
+ },
380
+ "type": datasets.Value("string"),
381
+ }
382
+ ],
383
+ "equivalences": [ # Equiv line in brat
384
+ {
385
+ "id": datasets.Value("string"),
386
+ "ref_ids": datasets.Sequence(datasets.Value("string")),
387
+ }
388
+ ],
389
+ "attributes": [ # M or A lines in brat
390
+ {
391
+ "id": datasets.Value("string"),
392
+ "type": datasets.Value("string"),
393
+ "ref_id": datasets.Value("string"),
394
+ "value": datasets.Value("string"),
395
+ }
396
+ ],
397
+ "normalizations": [ # N lines in brat
398
+ {
399
+ "id": datasets.Value("string"),
400
+ "type": datasets.Value("string"),
401
+ "ref_id": datasets.Value("string"),
402
+ "resource_name": datasets.Value(
403
+ "string"
404
+ ), # Name of the resource, e.g. "Wikipedia"
405
+ "cuid": datasets.Value(
406
+ "string"
407
+ ), # ID in the resource, e.g. 534366
408
+ "text": datasets.Value(
409
+ "string"
410
+ ), # Human readable description/name of the entity, e.g. "Barack Obama"
411
+ }
412
+ ],
413
+ ### OPTIONAL: Only included when `parse_notes == True`
414
+ "notes": [ # # lines in brat
415
+ {
416
+ "id": datasets.Value("string"),
417
+ "type": datasets.Value("string"),
418
+ "ref_id": datasets.Value("string"),
419
+ "text": datasets.Value("string"),
420
+ }
421
+ ],
422
+ },
423
+ )
424
+ """
425
+
426
+ example = {}
427
+ example["document_id"] = txt_file.with_suffix("").name
428
+ with txt_file.open(encoding="utf-8") as f:
429
+ if self.config.schema == "bigbio_kb":
430
+ example["text"] = f.read().replace("\u00A0", " ").replace("\n", " ")
431
+ else:
432
+ example["text"] = f.read()
433
+
434
+ # If no specific suffixes of the to-be-read annotation files are given - take standard suffixes
435
+ # for event extraction
436
+ if annotation_file_suffixes is None:
437
+ annotation_file_suffixes = [".a1", ".a2", ".ann"]
438
+
439
+ if len(annotation_file_suffixes) == 0:
440
+ raise AssertionError(
441
+ "At least one suffix for the to-be-read annotation files should be given!"
442
+ )
443
+
444
+ ann_lines = []
445
+ for suffix in annotation_file_suffixes:
446
+ annotation_file = txt_file.with_suffix(suffix)
447
+ if annotation_file.exists():
448
+ with annotation_file.open(encoding="utf8") as f:
449
+ ann_lines.extend(f.readlines())
450
+
451
+ example["text_bound_annotations"] = []
452
+ example["events"] = []
453
+ example["relations"] = []
454
+ example["equivalences"] = []
455
+ example["attributes"] = []
456
+ example["normalizations"] = []
457
+
458
+ if parse_notes:
459
+ example["notes"] = []
460
+
461
+ for line in ann_lines:
462
+ line = line.strip()
463
+ if not line:
464
+ continue
465
+
466
+ if line.startswith("T"): # Text bound
467
+ ann = {}
468
+ fields = line.split("\t")
469
+ ann["id"] = fields[0]
470
+ ann["type"] = fields[1].split()[0]
471
+ if ann["type"] in ["Title", "Paragraph"]:
472
+ continue
473
+ ann["offsets"] = []
474
+ span_str = parsing.remove_prefix(fields[1], (ann["type"] + " "))
475
+ text = fields[2]
476
+ for span in span_str.split(";"):
477
+ start, end = span.split()
478
+ ann["offsets"].append([int(start), int(end)])
479
+
480
+ # Heuristically split text of discontiguous entities into chunks
481
+ ann["text"] = []
482
+ if len(ann["offsets"]) > 1:
483
+ i = 0
484
+ for start, end in ann["offsets"]:
485
+ chunk_len = end - start
486
+ if self.config.schema == "bigbio_kb":
487
+ ann["text"].append(
488
+ text[i : chunk_len + i].replace("\u00A0", " ")
489
+ )
490
+ else:
491
+ ann["text"].append(text[i : chunk_len + i])
492
+ i += chunk_len
493
+ while i < len(text) and text[i] == " ":
494
+ i += 1
495
+ else:
496
+ if self.config.schema == "bigbio_kb":
497
+ ann["text"] = [text.replace("\u00A0", " ")]
498
+ else:
499
+ ann["text"] = [text]
500
+
501
+ example["text_bound_annotations"].append(ann)
502
+
503
+ elif line.startswith("E"):
504
+ ann = {}
505
+ fields = line.split("\t")
506
+
507
+ ann["id"] = fields[0]
508
+
509
+ ann["type"], ann["trigger"] = fields[1].split()[0].split(":")
510
+
511
+ ann["arguments"] = []
512
+ for role_ref_id in fields[1].split()[1:]:
513
+ argument = {
514
+ "role": (role_ref_id.split(":"))[0],
515
+ "ref_id": (role_ref_id.split(":"))[1],
516
+ }
517
+ ann["arguments"].append(argument)
518
+
519
+ example["events"].append(ann)
520
+
521
+ elif line.startswith("R"):
522
+ ann = {}
523
+ fields = line.split("\t")
524
+
525
+ ann["id"] = fields[0]
526
+ ann["type"] = fields[1].split()[0]
527
+
528
+ ann["head"] = {
529
+ "role": fields[1].split()[1].split(":")[0],
530
+ "ref_id": fields[1].split()[1].split(":")[1],
531
+ }
532
+ ann["tail"] = {
533
+ "role": fields[1].split()[2].split(":")[0],
534
+ "ref_id": fields[1].split()[2].split(":")[1],
535
+ }
536
+
537
+ example["relations"].append(ann)
538
+
539
+ # '*' seems to be the legacy way to mark equivalences,
540
+ # but I couldn't find any info on the current way
541
+ # this might have to be adapted dependent on the brat version
542
+ # of the annotation
543
+ elif line.startswith("*"):
544
+ ann = {}
545
+ fields = line.split("\t")
546
+
547
+ ann["id"] = fields[0]
548
+ ann["ref_ids"] = fields[1].split()[1:]
549
+
550
+ example["equivalences"].append(ann)
551
+
552
+ elif line.startswith("A") or line.startswith("M"):
553
+ ann = {}
554
+ fields = line.split("\t")
555
+
556
+ ann["id"] = fields[0]
557
+
558
+ info = fields[1].split()
559
+ ann["type"] = info[0]
560
+ ann["ref_id"] = info[1]
561
+
562
+ if len(info) > 2:
563
+ ann["value"] = info[2]
564
+ else:
565
+ ann["value"] = ""
566
+
567
+ example["attributes"].append(ann)
568
+
569
+ elif line.startswith("N"):
570
+ ann = {}
571
+ fields = line.split("\t")
572
+
573
+ ann["id"] = fields[0]
574
+
575
+ info = fields[1].split()
576
+
577
+ ann["ref_id"] = info[1].split(":")[-1]
578
+ ann["resource_name"] = info[0]
579
+ ann["cuid"] = "".join(info[2].split(":")[1:])
580
+ example["normalizations"].append(ann)
581
+
582
+ elif parse_notes and line.startswith("#"):
583
+ ann = {}
584
+ fields = line.split("\t")
585
+
586
+ ann["id"] = fields[0]
587
+ ann["text"] = fields[2]
588
+
589
+ info = fields[1].split()
590
+
591
+ ann["type"] = info[0]
592
+ ann["ref_id"] = info[1]
593
+ example["notes"].append(ann)
594
+ return example