wzkariampuzha commited on
Commit
9ca744f
1 Parent(s): 0e88ed2

Update EpiSet4NER-v2.py

Browse files
Files changed (1) hide show
  1. EpiSet4NER-v2.py +94 -33
EpiSet4NER-v2.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -14,61 +14,122 @@
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
- """EpiClassify4GARD dataset."""
18
 
 
19
 
20
- import csv
21
  import datasets
22
- from datasets.tasks import TextClassification
23
 
24
 
25
- _DESCRIPTION = """\
26
- INSERT DESCRIPTION
27
- """
28
  _CITATION = """\
29
- John JN, Sid E, Zhu Q. Recurrent Neural Networks to Automatically Identify Rare Disease Epidemiologic Studies from PubMed. AMIA Jt Summits Transl Sci Proc. 2021 May 17;2021:325-334. PMID: 34457147; PMCID: PMC8378621.
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  """
31
 
32
- _TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/ncats/GARD_EpiSet4TextClassification/raw/main/train_short.tsv"
33
- _VAL_DOWNLOAD_URL = "https://huggingface.co/datasets/ncats/GARD_EpiSet4TextClassification/raw/main/val_short.tsv"
34
- _TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/ncats/GARD_EpiSet4TextClassification/raw/main/test.tsv"
 
35
 
36
 
37
- class EpiClassify4GARD(datasets.GeneratorBasedBuilder):
38
- """EpiClassify4GARD text classification dataset."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  def _info(self):
41
  return datasets.DatasetInfo(
42
  description=_DESCRIPTION,
43
  features=datasets.Features(
44
  {
45
- "abstract": datasets.Value("string"),
46
- "label": datasets.features.ClassLabel(names=["1 = IsEpi", "0 = IsNotEpi"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  }
48
  ),
49
- homepage="https://github.com/ncats/epi4GARD/tree/master/Epi4GARD#epi4gard",
 
50
  citation=_CITATION,
51
- task_templates=[TextClassification(text_column="abstract", label_column="label")],
52
  )
53
 
54
  def _split_generators(self, dl_manager):
55
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
56
- val_path = dl_manager.download_and_extract(_VAL_DOWNLOAD_URL)
57
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
 
 
 
 
 
58
  return [
59
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
60
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_path }),
61
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
62
  ]
63
 
64
  def _generate_examples(self, filepath):
65
- """Generate examples."""
66
- with open(filepath, encoding="utf-8") as csv_file:
67
- csv_reader = csv.reader(
68
- csv_file, quotechar='"', delimiter="\t", quoting=csv.QUOTE_ALL, skipinitialspace=True
69
- )
70
- next(csv_reader)
71
- for id_, row in enumerate(csv_reader):
72
- abstract = row[0]
73
- label = row[1]
74
- yield id_, {"abstract": abstract, "label": int(label)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
14
  # limitations under the License.
15
 
16
  # Lint as: python3
17
+ """INSERT TITLE"""
18
 
19
+ import logging
20
 
 
21
  import datasets
 
22
 
23
 
 
 
 
24
  _CITATION = """\
25
+ *REDO*
26
+ @inproceedings{wang2019crossweigh,
27
+ title={CrossWeigh: Training Named Entity Tagger from Imperfect Annotations},
28
+ author={Wang, Zihan and Shang, Jingbo and Liu, Liyuan and Lu, Lihao and Liu, Jiacheng and Han, Jiawei},
29
+ booktitle={Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)},
30
+ pages={5157--5166},
31
+ year={2019}
32
+ }
33
+ """
34
+
35
+ _DESCRIPTION = """\
36
+ **REWRITE*
37
+ EpiSet4NER is a dataset generated from 620 rare disease abstracts labeled using statistical and rule-base methods. The test set was then manually corrected by a rare disease expert.
38
+ For more details see *INSERT PAPER* and https://github.com/ncats/epi4GARD/tree/master/EpiExtract4GARD#epiextract4gard
39
  """
40
 
41
+ _URL = "https://github.com/NCATS/epi4GARD/raw/master/EpiExtract4GARD/datasets/EpiCustomV3/"
42
+ _TRAINING_FILE = "train.tsv"
43
+ _VAL_FILE = "val.tsv"
44
+ _TEST_FILE = "test.tsv"
45
 
46
 
47
+ class EpiSetConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for Conll2003"""
49
+
50
+ def __init__(self, **kwargs):
51
+ """BuilderConfig forConll2003.
52
+ Args:
53
+ **kwargs: keyword arguments forwarded to super.
54
+ """
55
+ super(EpiSetConfig, self).__init__(**kwargs)
56
+
57
+
58
+ class EpiSet(datasets.GeneratorBasedBuilder):
59
+ """EpiSet4NER by GARD."""
60
+
61
+ BUILDER_CONFIGS = [
62
+ EpiSetConfig(name="EpiSet4NER", version=datasets.Version("3.2.1"), description="EpiSet4NER by NIH NCATS GARD"),
63
+ ]
64
 
65
  def _info(self):
66
  return datasets.DatasetInfo(
67
  description=_DESCRIPTION,
68
  features=datasets.Features(
69
  {
70
+ "id": datasets.Value("string"),
71
+ "tokens": datasets.Sequence(datasets.Value("string")),
72
+ "ner_tags": datasets.Sequence(
73
+ datasets.features.ClassLabel(
74
+ names=[
75
+ "O", #(0)
76
+ "B-LOC", #(1)
77
+ "I-LOC", #(2)
78
+ "B-EPI", #(3)
79
+ "I-EPI", #(4)
80
+ "B-STAT", #(5)
81
+ "I-STAT", #(6)
82
+ ]
83
+ )
84
+ ),
85
  }
86
  ),
87
+ supervised_keys=None,
88
+ homepage="https://github.com/ncats/epi4GARD/tree/master/EpiExtract4GARD#epiextract4gard",
89
  citation=_CITATION,
 
90
  )
91
 
92
  def _split_generators(self, dl_manager):
93
+ """Returns SplitGenerators."""
94
+ urls_to_download = {
95
+ "train": f"{_URL}{_TRAINING_FILE}",
96
+ "val": f"{_URL}{_VAL_FILE}",
97
+ "test": f"{_URL}{_TEST_FILE}",
98
+ }
99
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
100
+
101
  return [
102
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
103
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["val"]}),
104
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
105
  ]
106
 
107
  def _generate_examples(self, filepath):
108
+ logging.info(" Generating examples from = %s", filepath)
109
+ with open(filepath, encoding="utf-8") as f:
110
+ guid = 0
111
+ tokens = []
112
+ ner_tags = []
113
+ for line in f:
114
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
115
+ if tokens:
116
+ yield guid, {
117
+ "id": str(guid),
118
+ "tokens": tokens,
119
+ "ner_tags": ner_tags,
120
+ }
121
+ guid += 1
122
+ tokens = []
123
+ ner_tags = []
124
+ else:
125
+ # EpiSet tokens are space separated
126
+ splits = line.split("\t")
127
+ tokens.append(splits[0])
128
+ ner_tags.append(splits[1].rstrip())
129
+ # last example
130
+ if tokens:
131
+ yield guid, {
132
+ "id": str(guid),
133
+ "tokens": tokens,
134
+ "ner_tags": ner_tags,
135
+ }