Datasets:
Tasks:
Text Classification
Formats:
parquet
Sub-tasks:
multi-label-classification
Languages:
English
Size:
100K - 1M
License:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- ohsumed.py +51 -48
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- human-annotated
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: Ohsumed
|
3 |
annotations_creators:
|
4 |
- human-annotated
|
5 |
language_creators:
|
ohsumed.py
CHANGED
@@ -15,8 +15,6 @@
|
|
15 |
"""OHSUMED: An Interactive Retrieval Evaluation and New Large Test Collection for Research."""
|
16 |
|
17 |
|
18 |
-
import os
|
19 |
-
|
20 |
import datasets
|
21 |
|
22 |
|
@@ -128,24 +126,27 @@ class Ohsumed(datasets.GeneratorBasedBuilder):
|
|
128 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
129 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
130 |
my_urls = _URLs[self.config.name]
|
131 |
-
|
132 |
return [
|
133 |
datasets.SplitGenerator(
|
134 |
name=datasets.Split.TRAIN,
|
135 |
# These kwargs will be passed to _generate_examples
|
136 |
gen_kwargs={
|
137 |
-
"filepath":
|
138 |
-
"
|
139 |
},
|
140 |
),
|
141 |
datasets.SplitGenerator(
|
142 |
name=datasets.Split.TEST,
|
143 |
# These kwargs will be passed to _generate_examples
|
144 |
-
gen_kwargs={
|
|
|
|
|
|
|
145 |
),
|
146 |
]
|
147 |
|
148 |
-
def _generate_examples(self, filepath,
|
149 |
"""Yields examples."""
|
150 |
# TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
151 |
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
@@ -179,44 +180,46 @@ class Ohsumed(datasets.GeneratorBasedBuilder):
|
|
179 |
".S": "source",
|
180 |
}
|
181 |
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
line
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
|
|
|
|
|
15 |
"""OHSUMED: An Interactive Retrieval Evaluation and New Large Test Collection for Research."""
|
16 |
|
17 |
|
|
|
|
|
18 |
import datasets
|
19 |
|
20 |
|
|
|
126 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
127 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
128 |
my_urls = _URLs[self.config.name]
|
129 |
+
archive = dl_manager.download(my_urls)
|
130 |
return [
|
131 |
datasets.SplitGenerator(
|
132 |
name=datasets.Split.TRAIN,
|
133 |
# These kwargs will be passed to _generate_examples
|
134 |
gen_kwargs={
|
135 |
+
"filepath": "ohsu-trec/trec9-train/ohsumed.87",
|
136 |
+
"files": dl_manager.iter_archive(archive),
|
137 |
},
|
138 |
),
|
139 |
datasets.SplitGenerator(
|
140 |
name=datasets.Split.TEST,
|
141 |
# These kwargs will be passed to _generate_examples
|
142 |
+
gen_kwargs={
|
143 |
+
"filepath": "ohsu-trec/trec9-test/ohsumed.88-91",
|
144 |
+
"files": dl_manager.iter_archive(archive),
|
145 |
+
},
|
146 |
),
|
147 |
]
|
148 |
|
149 |
+
def _generate_examples(self, filepath, files):
|
150 |
"""Yields examples."""
|
151 |
# TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
|
152 |
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset
|
|
|
180 |
".S": "source",
|
181 |
}
|
182 |
|
183 |
+
for path, f in files:
|
184 |
+
if path == filepath:
|
185 |
+
data = ohsumed_dict()
|
186 |
+
|
187 |
+
for line in f.readlines():
|
188 |
+
line = line.decode("utf-8").strip()
|
189 |
+
|
190 |
+
if line.startswith(".I"):
|
191 |
+
tag = ".I"
|
192 |
+
if data["medline_ui"] != -1:
|
193 |
+
id_ = data["seq_id"] + "_" + data["medline_ui"]
|
194 |
+
yield id_, {
|
195 |
+
"seq_id": data["seq_id"],
|
196 |
+
"medline_ui": data["medline_ui"],
|
197 |
+
"mesh_terms": str(data["mesh_terms"]),
|
198 |
+
"title": str(data["title"]),
|
199 |
+
"publication_type": str(data["publication_type"]),
|
200 |
+
"abstract": str(data["abstract"]),
|
201 |
+
"author": str(data["author"]),
|
202 |
+
"source": str(data["source"]),
|
203 |
+
}
|
204 |
+
else:
|
205 |
+
data = ohsumed_dict()
|
206 |
+
line = line.replace(".I", "").strip()
|
207 |
+
data["seq_id"] = line
|
208 |
+
elif tag and not line.startswith("."):
|
209 |
+
key = column_map[tag]
|
210 |
+
data[key] = line
|
211 |
+
elif ".U" in line:
|
212 |
+
tag = ".U"
|
213 |
+
elif ".M" in line:
|
214 |
+
tag = ".M"
|
215 |
+
elif ".T" in line:
|
216 |
+
tag = ".T"
|
217 |
+
elif ".P" in line:
|
218 |
+
tag = ".P"
|
219 |
+
elif ".W" in line:
|
220 |
+
tag = ".W"
|
221 |
+
elif ".A" in line:
|
222 |
+
tag = ".A"
|
223 |
+
elif ".S" in line:
|
224 |
+
tag = ".S"
|
225 |
+
break
|