Create rsna-2023-abdominal-trauma-detection.py
Browse files
rsna-2023-abdominal-trauma-detection.py
ADDED
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import urllib
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
|
7 |
+
_CITATION = """\
|
8 |
+
@InProceedings{huggingface:dataset,
|
9 |
+
title = {RSNA 2023 Abdominal Trauma Detection Dataset},
|
10 |
+
author={Hong Jia Herng},
|
11 |
+
year={2023}
|
12 |
+
}
|
13 |
+
@misc{rsna-2023-abdominal-trauma-detection,
|
14 |
+
author = {Errol Colak, Hui-Ming Lin, Robyn Ball, Melissa Davis, Adam Flanders, Sabeena Jalal, Kirti Magudia, Brett Marinelli, Savvas Nicolaou, Luciano Prevedello, Jeff Rudie, George Shih, Maryam Vazirabad, John Mongan},
|
15 |
+
title = {RSNA 2023 Abdominal Trauma Detection},
|
16 |
+
publisher = {Kaggle},
|
17 |
+
year = {2023},
|
18 |
+
url = {https://kaggle.com/competitions/rsna-2023-abdominal-trauma-detection}
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
This dataset is the preprocessed version of the dataset from RSNA 2023 Abdominal Trauma Detection Kaggle Competition.
|
24 |
+
It is tailored for segmentation and classification tasks. It contains 3 different configs as described below:
|
25 |
+
- segmentation: 206 instances where each instance includes a CT scan in NIfTI format, a segmentation mask in NIfTI format, and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
|
26 |
+
- classification: 4711 instances where each instance includes a CT scan in NIfTI format, target labels (e.g., extravasation, bowel, kidney, liver, spleen, any_injury), and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
|
27 |
+
- classification-with-mask: 206 instances where each instance includes a CT scan in NIfTI format, a segmentation mask in NIfTI format, target labels (e.g., extravasation, bowel, kidney, liver, spleen, any_injury), and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
|
28 |
+
|
29 |
+
All CT scans and segmentation masks had already been resampled with voxel spacing (2.0, 2.0, 3.0) and thus its reduced file size.
|
30 |
+
"""
|
31 |
+
|
32 |
+
_NAME = "rsna-2023-abdominal-trauma-detection"
|
33 |
+
|
34 |
+
_HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}"
|
35 |
+
|
36 |
+
_LICENSE = "MIT"
|
37 |
+
|
38 |
+
_URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/"
|
39 |
+
|
40 |
+
|
41 |
+
class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilder):
|
42 |
+
VERSION = datasets.Version("1.0.0")
|
43 |
+
|
44 |
+
BUILDER_CONFIGS = [
|
45 |
+
datasets.BuilderConfig(
|
46 |
+
name="segmentation",
|
47 |
+
version=VERSION,
|
48 |
+
description="This part of the dataset loads the CT scans, segmentation masks, and metadata.",
|
49 |
+
),
|
50 |
+
datasets.BuilderConfig(
|
51 |
+
name="classification",
|
52 |
+
version=VERSION,
|
53 |
+
description="This part of the dataset loads the CT scans, target labels, and metadata.",
|
54 |
+
),
|
55 |
+
datasets.BuilderConfig(
|
56 |
+
name="classification-with-mask",
|
57 |
+
version=VERSION,
|
58 |
+
description="This part of the dataset loads the CT scans, segmentation masks, target labels, and metadata.",
|
59 |
+
),
|
60 |
+
]
|
61 |
+
|
62 |
+
DEFAULT_CONFIG_NAME = "classification" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
63 |
+
|
64 |
+
def _info(self):
|
65 |
+
if self.config.name == "segmentation":
|
66 |
+
features = datasets.Features(
|
67 |
+
{
|
68 |
+
"img_path": datasets.Value("string"),
|
69 |
+
"seg_path": datasets.Value("string"),
|
70 |
+
"metadata": {
|
71 |
+
"series_id": datasets.Value("int32"),
|
72 |
+
"patient_id": datasets.Value("int32"),
|
73 |
+
"incomplete_organ": datasets.Value("bool"),
|
74 |
+
"aortic_hu": datasets.Value("float32"),
|
75 |
+
"pixel_representation": datasets.Value("int32"),
|
76 |
+
"bits_allocated": datasets.Value("int32"),
|
77 |
+
"bits_stored": datasets.Value("int32"),
|
78 |
+
},
|
79 |
+
}
|
80 |
+
)
|
81 |
+
elif self.config.name == "classification-with-mask":
|
82 |
+
features = datasets.Features(
|
83 |
+
{
|
84 |
+
"img_path": datasets.Value("string"),
|
85 |
+
"seg_path": datasets.Value("string"),
|
86 |
+
"bowel": datasets.ClassLabel(
|
87 |
+
num_classes=2, names=["healthy", "injury"]
|
88 |
+
),
|
89 |
+
"extravasation": datasets.ClassLabel(
|
90 |
+
num_classes=2, names=["healthy", "injury"]
|
91 |
+
),
|
92 |
+
"kidney": datasets.ClassLabel(
|
93 |
+
num_classes=3, names=["healthy", "low", "high"]
|
94 |
+
),
|
95 |
+
"liver": datasets.ClassLabel(
|
96 |
+
num_classes=3, names=["healthy", "low", "high"]
|
97 |
+
),
|
98 |
+
"spleen": datasets.ClassLabel(
|
99 |
+
num_classes=3, names=["healthy", "low", "high"]
|
100 |
+
),
|
101 |
+
"any_injury": datasets.Value("bool"),
|
102 |
+
"metadata": {
|
103 |
+
"series_id": datasets.Value("int32"),
|
104 |
+
"patient_id": datasets.Value("int32"),
|
105 |
+
"incomplete_organ": datasets.Value("bool"),
|
106 |
+
"aortic_hu": datasets.Value("float32"),
|
107 |
+
"pixel_representation": datasets.Value("int32"),
|
108 |
+
"bits_allocated": datasets.Value("int32"),
|
109 |
+
"bits_stored": datasets.Value("int32"),
|
110 |
+
},
|
111 |
+
}
|
112 |
+
)
|
113 |
+
|
114 |
+
else:
|
115 |
+
features = datasets.Features(
|
116 |
+
{
|
117 |
+
"img_path": datasets.Value("string"),
|
118 |
+
"bowel": datasets.ClassLabel(
|
119 |
+
num_classes=2, names=["healthy", "injury"]
|
120 |
+
),
|
121 |
+
"extravasation": datasets.ClassLabel(
|
122 |
+
num_classes=2, names=["healthy", "injury"]
|
123 |
+
),
|
124 |
+
"kidney": datasets.ClassLabel(
|
125 |
+
num_classes=3, names=["healthy", "low", "high"]
|
126 |
+
),
|
127 |
+
"liver": datasets.ClassLabel(
|
128 |
+
num_classes=3, names=["healthy", "low", "high"]
|
129 |
+
),
|
130 |
+
"spleen": datasets.ClassLabel(
|
131 |
+
num_classes=3, names=["healthy", "low", "high"]
|
132 |
+
),
|
133 |
+
"any_injury": datasets.Value("bool"),
|
134 |
+
"metadata": {
|
135 |
+
"series_id": datasets.Value("int32"),
|
136 |
+
"patient_id": datasets.Value("int32"),
|
137 |
+
"incomplete_organ": datasets.Value("bool"),
|
138 |
+
"aortic_hu": datasets.Value("float32"),
|
139 |
+
"pixel_representation": datasets.Value("int32"),
|
140 |
+
"bits_allocated": datasets.Value("int32"),
|
141 |
+
"bits_stored": datasets.Value("int32"),
|
142 |
+
},
|
143 |
+
}
|
144 |
+
)
|
145 |
+
|
146 |
+
return datasets.DatasetInfo(
|
147 |
+
description=_DESCRIPTION,
|
148 |
+
features=features,
|
149 |
+
homepage=_HOMEPAGE,
|
150 |
+
license=_LICENSE,
|
151 |
+
citation=_CITATION,
|
152 |
+
)
|
153 |
+
|
154 |
+
def _split_generators(self, dl_manager):
|
155 |
+
# segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
|
156 |
+
# classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
|
157 |
+
# classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
|
158 |
+
series_meta_file = dl_manager.download_and_extract(
|
159 |
+
urllib.parse.urljoin(_URL, "train_series_meta.csv")
|
160 |
+
)
|
161 |
+
dicom_tags_file = dl_manager.download_and_extract(
|
162 |
+
urllib.parse.urljoin(_URL, "train_dicom_tags.parquet")
|
163 |
+
)
|
164 |
+
labels_file = (
|
165 |
+
dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
|
166 |
+
if self.config.name != "segmentation"
|
167 |
+
else None
|
168 |
+
)
|
169 |
+
|
170 |
+
series_meta_df = pd.read_csv(series_meta_file)
|
171 |
+
if (
|
172 |
+
self.config.name == "classification-with-mask"
|
173 |
+
or self.config.name == "segmentation"
|
174 |
+
):
|
175 |
+
series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
|
176 |
+
img_files = dl_manager.download(
|
177 |
+
series_meta_df.apply(
|
178 |
+
lambda x: urllib.parse.urljoin(
|
179 |
+
_URL, f"train_images/{x['patient_id']}/{x['series_id']}.nii.gz"
|
180 |
+
),
|
181 |
+
axis=1,
|
182 |
+
).tolist()
|
183 |
+
)
|
184 |
+
seg_files = dl_manager.download(
|
185 |
+
series_meta_df.apply(
|
186 |
+
lambda x: urllib.parse.urljoin(
|
187 |
+
_URL, f"segmentations/{x['series_id']}.nii.gz"
|
188 |
+
),
|
189 |
+
axis=1,
|
190 |
+
).tolist()
|
191 |
+
)
|
192 |
+
else:
|
193 |
+
img_files = dl_manager.download(
|
194 |
+
series_meta_df.apply(
|
195 |
+
lambda x: urllib.parse.urljoin(
|
196 |
+
_URL, f"train_images/{x['patient_id']}/{x['series_id']}.nii.gz"
|
197 |
+
),
|
198 |
+
axis=1,
|
199 |
+
).tolist()
|
200 |
+
)
|
201 |
+
seg_files = None
|
202 |
+
|
203 |
+
return [
|
204 |
+
datasets.SplitGenerator(
|
205 |
+
name=datasets.Split.ALL,
|
206 |
+
gen_kwargs={
|
207 |
+
"series_ids": series_meta_df["series_id"].tolist(),
|
208 |
+
"dicom_tags_file": dicom_tags_file,
|
209 |
+
"series_meta_file": series_meta_file,
|
210 |
+
"labels_file": labels_file,
|
211 |
+
"img_files": img_files,
|
212 |
+
"seg_files": seg_files,
|
213 |
+
},
|
214 |
+
),
|
215 |
+
]
|
216 |
+
|
217 |
+
def _generate_examples(
|
218 |
+
self,
|
219 |
+
series_ids,
|
220 |
+
dicom_tags_file,
|
221 |
+
series_meta_file,
|
222 |
+
labels_file,
|
223 |
+
img_files,
|
224 |
+
seg_files,
|
225 |
+
):
|
226 |
+
series_meta_df = pd.read_csv(series_meta_file)
|
227 |
+
dicom_tags_df = pd.read_csv(dicom_tags_file)[
|
228 |
+
[
|
229 |
+
"SeriesInstanceUID",
|
230 |
+
"PixelRepresentation",
|
231 |
+
"BitsAllocated",
|
232 |
+
"BitsStored",
|
233 |
+
]
|
234 |
+
]
|
235 |
+
dicom_tags_df["SeriesID"] = dicom_tags_df["SeriesInstanceUID"].apply(
|
236 |
+
lambda x: int(x.split(".")[-1])
|
237 |
+
)
|
238 |
+
dicom_tags_df = dicom_tags_df.drop(labels=["SeriesInstanceUID"], axis=1)
|
239 |
+
dicom_tags_df = dicom_tags_df.groupby(by=["SeriesID"], as_index=False).first()
|
240 |
+
dicom_tags_df = dicom_tags_df.rename(
|
241 |
+
columns={
|
242 |
+
"SeriesID": "series_id",
|
243 |
+
"PixelRepresentation": "pixel_representation",
|
244 |
+
"BitsAllocated": "bits_allocated",
|
245 |
+
"BitsStored": "bits_stored",
|
246 |
+
}
|
247 |
+
)
|
248 |
+
series_meta_df = pd.merge(
|
249 |
+
left=series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
|
250 |
+
)
|
251 |
+
labels_df = pd.read_csv(labels_file) if self.config != "segmentation" else None
|
252 |
+
|
253 |
+
if self.config.name == "segmentation":
|
254 |
+
for key, (series_id, img_path, seg_path) in enumerate(
|
255 |
+
zip(series_ids, img_files, seg_files)
|
256 |
+
):
|
257 |
+
series_meta = (
|
258 |
+
series_meta_df.loc[series_meta_df["series_id"] == series_id]
|
259 |
+
.iloc[0]
|
260 |
+
.to_dict()
|
261 |
+
)
|
262 |
+
yield key, {
|
263 |
+
"img_path": img_path,
|
264 |
+
"seg_path": seg_path,
|
265 |
+
"metadata": {
|
266 |
+
"series_id": series_id,
|
267 |
+
"patient_id": series_meta["patient_id"],
|
268 |
+
"incomplete_organ": series_meta["incomplete_organ"],
|
269 |
+
"aortic_hu": series_meta["aortic_hu"],
|
270 |
+
"pixel_representation": series_meta["pixel_representation"],
|
271 |
+
"bits_allocated": series_meta["bits_allocated"],
|
272 |
+
"bits_stored": series_meta["bits_stored"],
|
273 |
+
},
|
274 |
+
}
|
275 |
+
|
276 |
+
elif self.config.name == "classification-with-mask":
|
277 |
+
for key, (series_id, img_path, seg_path) in enumerate(
|
278 |
+
zip(series_ids, img_files, seg_files)
|
279 |
+
):
|
280 |
+
series_meta = (
|
281 |
+
series_meta_df.loc[series_meta_df["series_id"] == series_id]
|
282 |
+
.iloc[0]
|
283 |
+
.to_dict()
|
284 |
+
)
|
285 |
+
patient_id = series_meta["patient_id"]
|
286 |
+
label_data = (
|
287 |
+
labels_df.loc[labels_df["patient_id"] == patient_id]
|
288 |
+
.iloc[0]
|
289 |
+
.to_dict()
|
290 |
+
)
|
291 |
+
yield key, {
|
292 |
+
"img_path": img_path,
|
293 |
+
"seg_path": seg_path,
|
294 |
+
"bowel": datasets.ClassLabel(
|
295 |
+
num_classes=2, names=["healthy", "injury"]
|
296 |
+
),
|
297 |
+
"extravasation": datasets.ClassLabel(
|
298 |
+
num_classes=2, names=["healthy", "injury"]
|
299 |
+
),
|
300 |
+
"kidney": datasets.ClassLabel(
|
301 |
+
num_classes=3, names=["healthy", "low", "high"]
|
302 |
+
),
|
303 |
+
"liver": datasets.ClassLabel(
|
304 |
+
num_classes=3, names=["healthy", "low", "high"]
|
305 |
+
),
|
306 |
+
"spleen": datasets.ClassLabel(
|
307 |
+
num_classes=3, names=["healthy", "low", "high"]
|
308 |
+
),
|
309 |
+
"any_injury": datasets.Value("bool"),
|
310 |
+
"metadata": {
|
311 |
+
"series_id": series_id,
|
312 |
+
"patient_id": series_meta["patient_id"],
|
313 |
+
"incomplete_organ": series_meta["incomplete_organ"],
|
314 |
+
"aortic_hu": series_meta["aortic_hu"],
|
315 |
+
"pixel_representation": series_meta["pixel_representation"],
|
316 |
+
"bits_allocated": series_meta["bits_allocated"],
|
317 |
+
"bits_stored": series_meta["bits_stored"],
|
318 |
+
},
|
319 |
+
}
|
320 |
+
else:
|
321 |
+
pass
|