jherng commited on
Commit
48c87c4
·
1 Parent(s): b3137bb

Update rsna-2023-abdominal-trauma-detection.py

Browse files
Files changed (1) hide show
  1. rsna-2023-abdominal-trauma-detection.py +343 -213
rsna-2023-abdominal-trauma-detection.py CHANGED
@@ -1,278 +1,408 @@
1
- import urllib.parse
2
-
3
- import datasets
4
  import pandas as pd
5
- import requests
 
 
6
 
7
  _CITATION = """\
8
- @inproceedings{Wu2020not,
9
- title={Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision},
10
- author={Wu, Peng and Liu, jing and Shi, Yujia and Sun, Yujia and Shao, Fangtao and Wu, Zhaoyang and Yang, Zhiwei},
11
- booktitle={European Conference on Computer Vision (ECCV)},
12
- year={2020}
 
 
 
 
 
 
13
  }
14
  """
15
 
16
  _DESCRIPTION = """\
17
- Dataset for the paper "Not only Look, but also Listen: Learning Multimodal Violence Detection under Weak Supervision". \
18
- The dataset is downloaded from the authors' website (https://roc-ng.github.io/XD-Violence/). Hosting this dataset on HuggingFace \
19
- is just to make it easier for my own project to use this dataset. Please cite the original paper if you use this dataset.
 
 
 
 
20
  """
21
 
22
- _NAME = "xd-violence"
23
 
24
  _HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}"
25
 
26
  _LICENSE = "MIT"
27
 
28
- _URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/data/"
29
 
30
 
31
- class XDViolenceConfig(datasets.BuilderConfig):
32
  def __init__(self, **kwargs):
33
- """BuilderConfig for XD-Violence.
34
- Args:
35
- **kwargs: keyword arguments forwarded to super.
36
- """
37
- super(XDViolenceConfig, self).__init__(**kwargs)
38
 
 
 
39
 
40
- class XDViolence(datasets.GeneratorBasedBuilder):
41
  BUILDER_CONFIGS = [
42
- XDViolenceConfig(
43
- name="video",
44
- description="Video dataset",
 
 
 
 
 
 
45
  ),
46
- XDViolenceConfig(
47
- name="rgb",
48
- description="RGB visual features of the video dataset",
 
49
  ),
50
  ]
51
 
52
- DEFAULT_CONFIG_NAME = "video"
53
- BUILDER_CONFIG_CLASS = XDViolenceConfig
54
-
55
- CODE2LABEL = {
56
- "A": "Normal",
57
- "B1": "Fighting",
58
- "B2": "Shooting",
59
- "B4": "Riot",
60
- "B5": "Abuse",
61
- "B6": "Car accident",
62
- "G": "Explosion",
63
- }
64
-
65
- LABEL2IDX = {
66
- "Normal": 0,
67
- "Fighting": 1,
68
- "Shooting": 2,
69
- "Riot": 3,
70
- "Abuse": 4,
71
- "Car accident": 5,
72
- "Explosion": 6,
73
- }
74
 
75
  def _info(self):
76
- if self.config.name == "rgb":
77
  features = datasets.Features(
78
  {
79
- "id": datasets.Value("string"),
80
- "rgb_feats": datasets.Array3D(
81
- shape=(None, 5, 2048),
82
- dtype="float32", # (num_frames, num_crops, feature_dim) use 5 crops by default as of now
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  ),
84
- "binary_target": datasets.ClassLabel(
85
- names=["Non-violence", "Violence"]
86
  ),
87
- "multilabel_target": datasets.Sequence(
88
- datasets.ClassLabel(
89
- names=[
90
- "Normal",
91
- "Fighting",
92
- "Shooting",
93
- "Riot",
94
- "Abuse",
95
- "Car accident",
96
- "Explosion",
97
- ]
98
- )
99
  ),
100
- "frame_annotations": datasets.Sequence(
101
- {
102
- "start": datasets.Value("int32"),
103
- "end": datasets.Value("int32"),
104
- }
105
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  }
107
  )
108
- else: # default = "video"
 
109
  features = datasets.Features(
110
  {
111
- "id": datasets.Value("string"),
112
- "path": datasets.Value("string"),
113
- "binary_target": datasets.ClassLabel(
114
- names=["Non-violence", "Violence"]
115
  ),
116
- "multilabel_target": datasets.Sequence(
117
- datasets.ClassLabel(
118
- names=[
119
- "Normal",
120
- "Fighting",
121
- "Shooting",
122
- "Riot",
123
- "Abuse",
124
- "Car accident",
125
- "Explosion",
126
- ]
127
- )
128
  ),
129
- "frame_annotations": datasets.Sequence(
130
- {
131
- "start": datasets.Value("int32"),
132
- "end": datasets.Value("int32"),
133
- }
134
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  }
136
  )
137
 
138
  return datasets.DatasetInfo(
139
- features=features,
140
  description=_DESCRIPTION,
 
141
  homepage=_HOMEPAGE,
142
  license=_LICENSE,
143
  citation=_CITATION,
144
  )
145
 
146
  def _split_generators(self, dl_manager):
147
- if self.config.name == "rgb":
148
- raise NotImplementedError("rgb not implemented yet")
149
- else:
150
- # Download train and test list files
151
- list_paths = {
152
- "train": dl_manager.download_and_extract(
153
- urllib.parse.urljoin(_URL, "train_list.txt")
154
- ),
155
- "test": dl_manager.download_and_extract(
156
- urllib.parse.urljoin(_URL, "test_list.txt")
157
- ),
158
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
 
160
- # Download test annotation file
161
- annotation_path = dl_manager.download_and_extract(
162
- urllib.parse.urljoin(_URL, "test_annotations.txt")
 
 
163
  )
164
 
165
- # Download videos
166
- video_urls = {
167
- "train": pd.read_csv(
168
- list_paths["train"],
169
- header=None,
170
- sep=" ",
171
- usecols=[0],
172
- names=["id"],
173
- )["id"]
174
- .apply(
175
- lambda x: urllib.parse.quote(
176
- urllib.parse.urljoin(_URL, f"video/{x.split('.mp4')[0]}.mp4"),
177
- safe=":/",
178
- )
179
- )
180
- .to_list(),
181
- "test": pd.read_csv(
182
- list_paths["test"],
183
- header=None,
184
- sep=" ",
185
- usecols=[0],
186
- names=["id"],
187
- )["id"]
188
- .apply(
189
- lambda x: urllib.parse.quote(
190
- urllib.parse.urljoin(_URL, f"video/{x.split('.mp4')[0]}.mp4"),
191
- safe=":/",
192
- )
193
- )
194
- .to_list(),
195
- }
196
 
197
- video_paths = {
198
- "train": dl_manager.download(video_urls["train"]),
199
- "test": dl_manager.download(video_urls["test"]),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  }
 
 
 
 
201
 
202
- # Function to read annotations
203
- annotation_readers = {
204
- "train": self._read_list,
205
- "test": self._read_test_annotations,
206
- }
 
 
207
 
208
- return [
209
- datasets.SplitGenerator(
210
- name=datasets.Split.TRAIN,
211
- gen_kwargs={
212
- "list_path": list_paths["train"],
213
- "frame_annotation_path": None,
214
- "video_paths": video_paths["train"],
215
- "annotation_reader": annotation_readers["train"],
216
- },
217
- ),
218
- datasets.SplitGenerator(
219
- name=datasets.Split.TEST,
220
- gen_kwargs={
221
- "list_path": list_paths["test"],
222
- "frame_annotation_path": annotation_path,
223
- "video_paths": video_paths["test"],
224
- "annotation_reader": annotation_readers["test"],
225
- },
226
- ),
227
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
  def _generate_examples(
230
- self, list_path, frame_annotation_path, video_paths, annotation_reader
 
231
  ):
232
- if self.config.name == "rgb":
233
- raise NotImplementedError("rgb not implemented yet")
234
- else:
235
- ann_data = annotation_reader(list_path, frame_annotation_path)
 
 
 
 
 
 
 
 
 
 
 
236
 
237
- for key, (path, annotation) in enumerate(zip(video_paths, ann_data)):
238
- id = annotation["id"]
239
- binary = annotation["binary_target"]
240
- multilabel = annotation["multilabel_target"]
241
- frame_annotations = annotation.get("frame_annotations", [])
 
 
 
 
242
 
243
- yield (
244
- key,
245
- {
246
- "id": id,
247
- "path": path,
248
- "binary_target": binary,
249
- "multilabel_target": multilabel,
250
- "frame_annotations": frame_annotations,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  },
 
 
 
 
 
 
 
 
 
252
  )
253
 
254
- @staticmethod
255
- def _read_list(list_path, frame_annotation_path):
256
- file_list = pd.read_csv(
257
- list_path, header=None, sep=" ", usecols=[0], names=["id"]
258
- )
259
- file_list["id"] = file_list["id"].apply(
260
- lambda x: x.split("/")[1].split(".mp4")[0]
261
- )
262
- file_list["binary_target"], file_list["multilabel_target"] = zip(
263
- *file_list["id"].apply(XDViolence._extract_labels)
264
- )
265
-
266
- return file_list.to_dict("records")
267
-
268
- @classmethod
269
- def _extract_labels(cls, video_id):
270
- """Extracts labels from the video id."""
271
- codes = video_id.split("_")[-1].split(".mp4")[0].split("-")
272
-
273
- binary = 1 if len(codes) > 1 else 0
274
- multilabel = [
275
- cls.LABEL2IDX[cls.CODE2LABEL[code]] for code in codes if code != "0"
276
- ]
277
-
278
- return binary, multilabel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import urllib
2
+ import numpy as np
 
3
  import pandas as pd
4
+ import datasets
5
+ from sklearn.model_selection import train_test_split
6
+
7
 
8
  _CITATION = """\
9
+ @InProceedings{huggingface:dataset,
10
+ title = {RSNA 2023 Abdominal Trauma Detection Dataset},
11
+ author={Hong Jia Herng},
12
+ year={2023}
13
+ }
14
+ @misc{rsna-2023-abdominal-trauma-detection,
15
+ author = {Errol Colak, Hui-Ming Lin, Robyn Ball, Melissa Davis, Adam Flanders, Sabeena Jalal, Kirti Magudia, Brett Marinelli, Savvas Nicolaou, Luciano Prevedello, Jeff Rudie, George Shih, Maryam Vazirabad, John Mongan},
16
+ title = {RSNA 2023 Abdominal Trauma Detection},
17
+ publisher = {Kaggle},
18
+ year = {2023},
19
+ url = {https://kaggle.com/competitions/rsna-2023-abdominal-trauma-detection}
20
  }
21
  """
22
 
23
  _DESCRIPTION = """\
24
+ This dataset is the preprocessed version of the dataset from RSNA 2023 Abdominal Trauma Detection Kaggle Competition.
25
+ It is tailored for segmentation and classification tasks. It contains 3 different configs as described below:
26
+ - segmentation: 206 instances where each instance includes a CT scan in NIfTI format, a segmentation mask in NIfTI format, and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
27
+ - classification: 4711 instances where each instance includes a CT scan in NIfTI format, target labels (e.g., extravasation, bowel, kidney, liver, spleen, any_injury), and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
28
+ - classification-with-mask: 206 instances where each instance includes a CT scan in NIfTI format, a segmentation mask in NIfTI format, target labels (e.g., extravasation, bowel, kidney, liver, spleen, any_injury), and its relevant metadata (e.g., patient_id, series_id, incomplete_organ, aortic_hu, pixel_representation, bits_allocated, bits_stored)
29
+
30
+ All CT scans and segmentation masks had already been resampled with voxel spacing (2.0, 2.0, 3.0) and thus its reduced file size.
31
  """
32
 
33
+ _NAME = "rsna-2023-abdominal-trauma-detection"
34
 
35
  _HOMEPAGE = f"https://huggingface.co/datasets/jherng/{_NAME}"
36
 
37
  _LICENSE = "MIT"
38
 
39
+ _URL = f"https://huggingface.co/datasets/jherng/{_NAME}/resolve/main/"
40
 
41
 
42
+ class RSNA2023AbdominalTraumaDetectionConfig(datasets.BuilderConfig):
43
  def __init__(self, **kwargs):
44
+ self.test_size = kwargs.pop("test_size", 0.1)
45
+ self.random_state = kwargs.pop("random_state", 42)
46
+ super(RSNA2023AbdominalTraumaDetectionConfig, self).__init__(**kwargs)
47
+
 
48
 
49
+ class RSNA2023AbdominalTraumaDetection(datasets.GeneratorBasedBuilder):
50
+ VERSION = datasets.Version("1.0.0")
51
 
 
52
  BUILDER_CONFIGS = [
53
+ RSNA2023AbdominalTraumaDetectionConfig(
54
+ name="segmentation",
55
+ version=VERSION,
56
+ description="This part of the dataset loads the CT scans, segmentation masks, and metadata.",
57
+ ),
58
+ RSNA2023AbdominalTraumaDetectionConfig(
59
+ name="classification",
60
+ version=VERSION,
61
+ description="This part of the dataset loads the CT scans, target labels, and metadata.",
62
  ),
63
+ RSNA2023AbdominalTraumaDetectionConfig(
64
+ name="classification-with-mask",
65
+ version=VERSION,
66
+ description="This part of the dataset loads the CT scans, segmentation masks, target labels, and metadata.",
67
  ),
68
  ]
69
 
70
+ DEFAULT_CONFIG_NAME = "classification" # It's not mandatory to have a default configuration. Just use one if it make sense.
71
+ BUILDER_CONFIG_CLASS = RSNA2023AbdominalTraumaDetectionConfig
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  def _info(self):
74
+ if self.config.name == "segmentation":
75
  features = datasets.Features(
76
  {
77
+ "img_path": datasets.Value("string"),
78
+ "seg_path": datasets.Value("string"),
79
+ "metadata": {
80
+ "series_id": datasets.Value("int32"),
81
+ "patient_id": datasets.Value("int32"),
82
+ "incomplete_organ": datasets.Value("bool"),
83
+ "aortic_hu": datasets.Value("float32"),
84
+ "pixel_representation": datasets.Value("int32"),
85
+ "bits_allocated": datasets.Value("int32"),
86
+ "bits_stored": datasets.Value("int32"),
87
+ },
88
+ }
89
+ )
90
+ elif self.config.name == "classification-with-mask":
91
+ features = datasets.Features(
92
+ {
93
+ "img_path": datasets.Value("string"),
94
+ "seg_path": datasets.Value("string"),
95
+ "bowel": datasets.ClassLabel(
96
+ num_classes=2, names=["healthy", "injury"]
97
  ),
98
+ "extravasation": datasets.ClassLabel(
99
+ num_classes=2, names=["healthy", "injury"]
100
  ),
101
+ "kidney": datasets.ClassLabel(
102
+ num_classes=3, names=["healthy", "low", "high"]
 
 
 
 
 
 
 
 
 
 
103
  ),
104
+ "liver": datasets.ClassLabel(
105
+ num_classes=3, names=["healthy", "low", "high"]
 
 
 
106
  ),
107
+ "spleen": datasets.ClassLabel(
108
+ num_classes=3, names=["healthy", "low", "high"]
109
+ ),
110
+ "any_injury": datasets.Value("bool"),
111
+ "metadata": {
112
+ "series_id": datasets.Value("int32"),
113
+ "patient_id": datasets.Value("int32"),
114
+ "incomplete_organ": datasets.Value("bool"),
115
+ "aortic_hu": datasets.Value("float32"),
116
+ "pixel_representation": datasets.Value("int32"),
117
+ "bits_allocated": datasets.Value("int32"),
118
+ "bits_stored": datasets.Value("int32"),
119
+ },
120
  }
121
  )
122
+
123
+ else:
124
  features = datasets.Features(
125
  {
126
+ "img_path": datasets.Value("string"),
127
+ "bowel": datasets.ClassLabel(
128
+ num_classes=2, names=["healthy", "injury"]
 
129
  ),
130
+ "extravasation": datasets.ClassLabel(
131
+ num_classes=2, names=["healthy", "injury"]
 
 
 
 
 
 
 
 
 
 
132
  ),
133
+ "kidney": datasets.ClassLabel(
134
+ num_classes=3, names=["healthy", "low", "high"]
 
 
 
135
  ),
136
+ "liver": datasets.ClassLabel(
137
+ num_classes=3, names=["healthy", "low", "high"]
138
+ ),
139
+ "spleen": datasets.ClassLabel(
140
+ num_classes=3, names=["healthy", "low", "high"]
141
+ ),
142
+ "any_injury": datasets.Value("bool"),
143
+ "metadata": {
144
+ "series_id": datasets.Value("int32"),
145
+ "patient_id": datasets.Value("int32"),
146
+ "incomplete_organ": datasets.Value("bool"),
147
+ "aortic_hu": datasets.Value("float32"),
148
+ "pixel_representation": datasets.Value("int32"),
149
+ "bits_allocated": datasets.Value("int32"),
150
+ "bits_stored": datasets.Value("int32"),
151
+ },
152
  }
153
  )
154
 
155
  return datasets.DatasetInfo(
 
156
  description=_DESCRIPTION,
157
+ features=features,
158
  homepage=_HOMEPAGE,
159
  license=_LICENSE,
160
  citation=_CITATION,
161
  )
162
 
163
  def _split_generators(self, dl_manager):
164
+ # segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
165
+ # classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
166
+ # classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
167
+ series_meta_df = pd.read_csv(
168
+ dl_manager.download_and_extract(
169
+ urllib.parse.urljoin(_URL, "train_series_meta.csv")
170
+ )
171
+ )
172
+ series_meta_df["img_download_url"] = series_meta_df.apply(
173
+ lambda x: urllib.parse.urljoin(
174
+ _URL,
175
+ f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
176
+ ),
177
+ axis=1,
178
+ )
179
+ series_meta_df["seg_download_url"] = series_meta_df.apply(
180
+ lambda x: urllib.parse.urljoin(
181
+ _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
182
+ ),
183
+ axis=1,
184
+ )
185
+ if (
186
+ self.config.name == "classification-with-mask"
187
+ or self.config.name == "segmentation"
188
+ ):
189
+ series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
190
 
191
+ series_meta_df["img_cache_path"] = dl_manager.download(
192
+ series_meta_df["img_download_url"].tolist()
193
+ )
194
+ series_meta_df["seg_cache_path"] = dl_manager.download(
195
+ series_meta_df["seg_download_url"].tolist()
196
  )
197
 
198
+ else:
199
+ series_meta_df["img_cache_path"] = dl_manager.download(
200
+ series_meta_df["img_download_url"].tolist()
201
+ )
202
+ series_meta_df["seg_cache_path"] = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
 
204
+ dicom_tags_df = datasets.load_dataset(
205
+ "parquet", data_files=urllib.parse.urljoin(_URL, "train_dicom_tags.parquet")
206
+ )["train"].to_pandas()[
207
+ [
208
+ "SeriesInstanceUID",
209
+ "PixelRepresentation",
210
+ "BitsAllocated",
211
+ "BitsStored",
212
+ ]
213
+ ]
214
+ dicom_tags_df["SeriesID"] = dicom_tags_df["SeriesInstanceUID"].apply(
215
+ lambda x: int(x.split(".")[-1])
216
+ )
217
+ dicom_tags_df = dicom_tags_df.drop(labels=["SeriesInstanceUID"], axis=1)
218
+ dicom_tags_df = dicom_tags_df.groupby(by=["SeriesID"], as_index=False).first()
219
+ dicom_tags_df = dicom_tags_df.rename(
220
+ columns={
221
+ "SeriesID": "series_id",
222
+ "PixelRepresentation": "pixel_representation",
223
+ "BitsAllocated": "bits_allocated",
224
+ "BitsStored": "bits_stored",
225
  }
226
+ )
227
+ series_meta_df = pd.merge(
228
+ left=series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
229
+ )
230
 
231
+ self.labels_df = (
232
+ pd.read_csv(
233
+ dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
234
+ )
235
+ if self.config.name != "segmentation"
236
+ else None
237
+ )
238
 
239
+ train_series_meta_df, test_series_meta_df = train_test_split(
240
+ series_meta_df, test_size=self.config.test_size, random_state=self.config.random_state, shuffle=True
241
+ )
242
+
243
+ return [
244
+ datasets.SplitGenerator(
245
+ name=datasets.Split.TRAIN,
246
+ gen_kwargs={
247
+ "filepaths": train_series_meta_df[
248
+ [
249
+ "series_id",
250
+ "patient_id",
251
+ "img_cache_path",
252
+ "seg_cache_path",
253
+ "incomplete_organ",
254
+ "aortic_hu",
255
+ "pixel_representation",
256
+ "bits_allocated",
257
+ "bits_stored",
258
+ ]
259
+ ].to_dict("records"),
260
+ },
261
+ ),
262
+ datasets.SplitGenerator(
263
+ name=datasets.Split.TEST,
264
+ gen_kwargs={
265
+ "filepaths": test_series_meta_df[
266
+ [
267
+ "series_id",
268
+ "patient_id",
269
+ "img_cache_path",
270
+ "seg_cache_path",
271
+ "incomplete_organ",
272
+ "aortic_hu",
273
+ "pixel_representation",
274
+ "bits_allocated",
275
+ "bits_stored",
276
+ ]
277
+ ].to_dict("records"),
278
+ },
279
+ ),
280
+ ]
281
 
282
  def _generate_examples(
283
+ self,
284
+ filepaths,
285
  ):
286
+ if self.config.name == "segmentation":
287
+ for key, series_meta in enumerate(filepaths):
288
+ yield key, {
289
+ "img_path": series_meta["img_cache_path"],
290
+ "seg_path": series_meta["seg_cache_path"],
291
+ "metadata": {
292
+ "series_id": series_meta["series_id"],
293
+ "patient_id": series_meta["patient_id"],
294
+ "incomplete_organ": series_meta["incomplete_organ"],
295
+ "aortic_hu": series_meta["aortic_hu"],
296
+ "pixel_representation": series_meta["pixel_representation"],
297
+ "bits_allocated": series_meta["bits_allocated"],
298
+ "bits_stored": series_meta["bits_stored"],
299
+ },
300
+ }
301
 
302
+ elif self.config.name == "classification-with-mask":
303
+ for key, series_meta in enumerate(filepaths):
304
+ label_data = (
305
+ self.labels_df.loc[
306
+ self.labels_df["patient_id"] == series_meta["patient_id"]
307
+ ]
308
+ .iloc[0]
309
+ .to_dict()
310
+ )
311
 
312
+ yield key, {
313
+ "img_path": series_meta["img_cache_path"],
314
+ "seg_path": series_meta["seg_cache_path"],
315
+ "bowel": np.argmax(
316
+ [label_data["bowel_healthy"], label_data["bowel_injury"]]
317
+ ),
318
+ "extravasation": np.argmax(
319
+ [
320
+ label_data["extravasation_healthy"],
321
+ label_data["extravasation_injury"],
322
+ ]
323
+ ),
324
+ "kidney": np.argmax(
325
+ [
326
+ label_data["kidney_healthy"],
327
+ label_data["kidney_low"],
328
+ label_data["kidney_high"],
329
+ ]
330
+ ),
331
+ "liver": np.argmax(
332
+ [
333
+ label_data["liver_healthy"],
334
+ label_data["liver_low"],
335
+ label_data["liver_high"],
336
+ ]
337
+ ),
338
+ "spleen": np.argmax(
339
+ [
340
+ label_data["spleen_healthy"],
341
+ label_data["spleen_low"],
342
+ label_data["spleen_high"],
343
+ ]
344
+ ),
345
+ "any_injury": label_data["any_injury"],
346
+ "metadata": {
347
+ "series_id": series_meta["series_id"],
348
+ "patient_id": series_meta["patient_id"],
349
+ "incomplete_organ": series_meta["incomplete_organ"],
350
+ "aortic_hu": series_meta["aortic_hu"],
351
+ "pixel_representation": series_meta["pixel_representation"],
352
+ "bits_allocated": series_meta["bits_allocated"],
353
+ "bits_stored": series_meta["bits_stored"],
354
  },
355
+ }
356
+ else:
357
+ for key, series_meta in enumerate(filepaths):
358
+ label_data = (
359
+ self.labels_df.loc[
360
+ self.labels_df["patient_id"] == series_meta["patient_id"]
361
+ ]
362
+ .iloc[0]
363
+ .to_dict()
364
  )
365
 
366
+ yield key, {
367
+ "img_path": series_meta["img_cache_path"],
368
+ "bowel": np.argmax(
369
+ [label_data["bowel_healthy"], label_data["bowel_injury"]]
370
+ ),
371
+ "extravasation": np.argmax(
372
+ [
373
+ label_data["extravasation_healthy"],
374
+ label_data["extravasation_injury"],
375
+ ]
376
+ ),
377
+ "kidney": np.argmax(
378
+ [
379
+ label_data["kidney_healthy"],
380
+ label_data["kidney_low"],
381
+ label_data["kidney_high"],
382
+ ]
383
+ ),
384
+ "liver": np.argmax(
385
+ [
386
+ label_data["liver_healthy"],
387
+ label_data["liver_low"],
388
+ label_data["liver_high"],
389
+ ]
390
+ ),
391
+ "spleen": np.argmax(
392
+ [
393
+ label_data["spleen_healthy"],
394
+ label_data["spleen_low"],
395
+ label_data["spleen_high"],
396
+ ]
397
+ ),
398
+ "any_injury": label_data["any_injury"],
399
+ "metadata": {
400
+ "series_id": series_meta["series_id"],
401
+ "patient_id": series_meta["patient_id"],
402
+ "incomplete_organ": series_meta["incomplete_organ"],
403
+ "aortic_hu": series_meta["aortic_hu"],
404
+ "pixel_representation": series_meta["pixel_representation"],
405
+ "bits_allocated": series_meta["bits_allocated"],
406
+ "bits_stored": series_meta["bits_stored"],
407
+ },
408
+ }