jherng commited on
Commit
e754f80
·
1 Parent(s): 2e717ce

Update rsna-2023-abdominal-trauma-detection.py

Browse files
rsna-2023-abdominal-trauma-detection.py CHANGED
@@ -156,86 +156,46 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
156
  # segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
157
  # classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
158
  # classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
159
- self.series_meta_df = pd.read_csv(
160
  dl_manager.download_and_extract(
161
  urllib.parse.urljoin(_URL, "train_series_meta.csv")
162
  )
163
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  if (
165
  self.config.name == "classification-with-mask"
166
  or self.config.name == "segmentation"
167
  ):
168
- self.series_meta_df = self.series_meta_df.loc[
169
- self.series_meta_df["has_segmentation"] == 1
170
- ]
171
 
172
- train_series_meta_df, test_series_meta_df = train_test_split(
173
- self.series_meta_df, test_size=0.1, random_state=42, shuffle=True
174
  )
175
-
176
- train_img_files = dl_manager.download(
177
- train_series_meta_df.apply(
178
- lambda x: urllib.parse.urljoin(
179
- _URL,
180
- f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
181
- ),
182
- axis=1,
183
- ).tolist()
184
- )
185
- test_img_files = dl_manager.download(
186
- test_series_meta_df.apply(
187
- lambda x: urllib.parse.urljoin(
188
- _URL,
189
- f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
190
- ),
191
- axis=1,
192
- ).tolist()
193
- )
194
- train_seg_files = dl_manager.download(
195
- train_series_meta_df.apply(
196
- lambda x: urllib.parse.urljoin(
197
- _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
198
- ),
199
- axis=1,
200
- ).tolist()
201
- )
202
- test_seg_files = dl_manager.download(
203
- test_series_meta_df.apply(
204
- lambda x: urllib.parse.urljoin(
205
- _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
206
- ),
207
- axis=1,
208
- ).tolist()
209
- )
210
- else:
211
- train_series_meta_df, test_series_meta_df = train_test_split(
212
- self.series_meta_df, test_size=0.1, random_state=42, shuffle=True
213
  )
214
 
215
- train_img_files = dl_manager.download(
216
- train_series_meta_df.apply(
217
- lambda x: urllib.parse.urljoin(
218
- _URL,
219
- f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
220
- ),
221
- axis=1,
222
- ).tolist()
223
- )
224
- test_img_files = dl_manager.download(
225
- test_series_meta_df.apply(
226
- lambda x: urllib.parse.urljoin(
227
- _URL,
228
- f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
229
- ),
230
- axis=1,
231
- ).tolist()
232
  )
233
- train_seg_files = None
234
- test_seg_files = None
235
 
236
- dicom_tags_df = datasets.load_dataset("parquet", data_files=urllib.parse.urljoin(_URL, "train_dicom_tags.parquet"))[
237
- "train"
238
- ].to_pandas()[
239
  [
240
  "SeriesInstanceUID",
241
  "PixelRepresentation",
@@ -256,55 +216,72 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
256
  "BitsStored": "bits_stored",
257
  }
258
  )
259
- self.series_meta_df = pd.merge(
260
- left=self.series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
 
 
 
 
 
 
 
 
 
 
 
 
261
  )
262
-
263
- self.labels_df = pd.read_csv(
264
- dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
265
- ) if self.config.name != "segmentation" else None
266
 
267
  return [
268
  datasets.SplitGenerator(
269
  name=datasets.Split.TRAIN,
270
  gen_kwargs={
271
- "series_ids": train_series_meta_df["series_id"].tolist(),
272
- "img_files": train_img_files,
273
- "seg_files": train_seg_files,
 
 
 
 
 
 
 
 
 
 
274
  },
275
  ),
276
  datasets.SplitGenerator(
277
  name=datasets.Split.TEST,
278
  gen_kwargs={
279
- "series_ids": test_series_meta_df["series_id"].tolist(),
280
- "img_files": test_img_files,
281
- "seg_files": test_seg_files,
 
 
 
 
 
 
 
 
 
 
282
  },
283
  ),
284
  ]
285
 
286
  def _generate_examples(
287
  self,
288
- series_ids,
289
- img_files,
290
- seg_files,
291
  ):
292
  if self.config.name == "segmentation":
293
- for key, (series_id, img_path, seg_path) in enumerate(
294
- zip(series_ids, img_files, seg_files)
295
- ):
296
- series_meta = (
297
- self.series_meta_df.loc[
298
- self.series_meta_df["series_id"] == series_id
299
- ]
300
- .iloc[0]
301
- .to_dict()
302
- )
303
  yield key, {
304
- "img_path": img_path,
305
- "seg_path": seg_path,
306
  "metadata": {
307
- "series_id": series_id,
308
  "patient_id": series_meta["patient_id"],
309
  "incomplete_organ": series_meta["incomplete_organ"],
310
  "aortic_hu": series_meta["aortic_hu"],
@@ -315,26 +292,18 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
315
  }
316
 
317
  elif self.config.name == "classification-with-mask":
318
- for key, (series_id, img_path, seg_path) in enumerate(
319
- zip(series_ids, img_files, seg_files)
320
- ):
321
- series_meta = (
322
- self.series_meta_df.loc[
323
- self.series_meta_df["series_id"] == series_id
324
- ]
325
- .iloc[0]
326
- .to_dict()
327
- )
328
- patient_id = series_meta["patient_id"]
329
  label_data = (
330
- self.labels_df.loc[self.labels_df["patient_id"] == patient_id]
 
 
331
  .iloc[0]
332
  .to_dict()
333
  )
334
 
335
  yield key, {
336
- "img_path": img_path,
337
- "seg_path": seg_path,
338
  "bowel": np.argmax(
339
  [label_data["bowel_healthy"], label_data["bowel_injury"]]
340
  ),
@@ -367,7 +336,7 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
367
  ),
368
  "any_injury": label_data["any_injury"],
369
  "metadata": {
370
- "series_id": series_id,
371
  "patient_id": series_meta["patient_id"],
372
  "incomplete_organ": series_meta["incomplete_organ"],
373
  "aortic_hu": series_meta["aortic_hu"],
@@ -377,23 +346,17 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
377
  },
378
  }
379
  else:
380
- for key, (series_id, img_path) in enumerate(zip(series_ids, img_files)):
381
- series_meta = (
382
- self.series_meta_df.loc[
383
- self.series_meta_df["series_id"] == series_id
384
- ]
385
- .iloc[0]
386
- .to_dict()
387
- )
388
- patient_id = series_meta["patient_id"]
389
  label_data = (
390
- self.labels_df.loc[self.labels_df["patient_id"] == patient_id]
 
 
391
  .iloc[0]
392
  .to_dict()
393
  )
394
 
395
  yield key, {
396
- "img_path": img_path,
397
  "bowel": np.argmax(
398
  [label_data["bowel_healthy"], label_data["bowel_injury"]]
399
  ),
@@ -426,7 +389,7 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
426
  ),
427
  "any_injury": label_data["any_injury"],
428
  "metadata": {
429
- "series_id": series_id,
430
  "patient_id": series_meta["patient_id"],
431
  "incomplete_organ": series_meta["incomplete_organ"],
432
  "aortic_hu": series_meta["aortic_hu"],
 
156
  # segmentation: 206 segmentations and the relevant imgs, train_series_meta.csv, train_dicom_tags.parquet
157
  # classification: 4711 all imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
158
  # classification-with-mask: 206 segmentations and the relevant imgs, train.csv, train_series_meta.csv, train_dicom_tags.parquet
159
+ series_meta_df = pd.read_csv(
160
  dl_manager.download_and_extract(
161
  urllib.parse.urljoin(_URL, "train_series_meta.csv")
162
  )
163
  )
164
+ series_meta_df["img_download_url"] = series_meta_df.apply(
165
+ lambda x: urllib.parse.urljoin(
166
+ _URL,
167
+ f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
168
+ ),
169
+ axis=1,
170
+ )
171
+ series_meta_df["seg_download_url"] = series_meta_df.apply(
172
+ lambda x: urllib.parse.urljoin(
173
+ _URL, f"segmentations/{int(x['series_id'])}.nii.gz"
174
+ ),
175
+ axis=1,
176
+ )
177
  if (
178
  self.config.name == "classification-with-mask"
179
  or self.config.name == "segmentation"
180
  ):
181
+ series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
 
 
182
 
183
+ series_meta_df["img_cache_path"] = dl_manager.download(
184
+ series_meta_df["img_download_url"].tolist()
185
  )
186
+ series_meta_df["seg_cache_path"] = dl_manager.download(
187
+ series_meta_df["seg_download_url"].tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  )
189
 
190
+ else:
191
+ series_meta_df["img_cache_path"] = dl_manager.download(
192
+ series_meta_df["img_download_url"].tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  )
194
+ series_meta_df["seg_cache_path"] = None
 
195
 
196
+ dicom_tags_df = datasets.load_dataset(
197
+ "parquet", data_files=urllib.parse.urljoin(_URL, "train_dicom_tags.parquet")
198
+ )["train"].to_pandas()[
199
  [
200
  "SeriesInstanceUID",
201
  "PixelRepresentation",
 
216
  "BitsStored": "bits_stored",
217
  }
218
  )
219
+ series_meta_df = pd.merge(
220
+ left=series_meta_df, right=dicom_tags_df, how="inner", on="series_id"
221
+ )
222
+
223
+ self.labels_df = (
224
+ pd.read_csv(
225
+ dl_manager.download_and_extract(urllib.parse.urljoin(_URL, "train.csv"))
226
+ )
227
+ if self.config.name != "segmentation"
228
+ else None
229
+ )
230
+
231
+ train_series_meta_df, test_series_meta_df = train_test_split(
232
+ series_meta_df, test_size=0.1, random_state=42, shuffle=True
233
  )
 
 
 
 
234
 
235
  return [
236
  datasets.SplitGenerator(
237
  name=datasets.Split.TRAIN,
238
  gen_kwargs={
239
+ "filepaths": train_series_meta_df[
240
+ [
241
+ "series_id",
242
+ "patient_id",
243
+ "img_cache_path",
244
+ "seg_cache_path",
245
+ "incomplete_organ",
246
+ "aortic_hu",
247
+ "pixel_representation",
248
+ "bits_allocated",
249
+ "bits_stored",
250
+ ]
251
+ ].to_dict("records"),
252
  },
253
  ),
254
  datasets.SplitGenerator(
255
  name=datasets.Split.TEST,
256
  gen_kwargs={
257
+ "filepaths": test_series_meta_df[
258
+ [
259
+ "series_id",
260
+ "patient_id",
261
+ "img_cache_path",
262
+ "seg_cache_path",
263
+ "incomplete_organ",
264
+ "aortic_hu",
265
+ "pixel_representation",
266
+ "bits_allocated",
267
+ "bits_stored",
268
+ ]
269
+ ].to_dict("records"),
270
  },
271
  ),
272
  ]
273
 
274
  def _generate_examples(
275
  self,
276
+ filepaths,
 
 
277
  ):
278
  if self.config.name == "segmentation":
279
+ for key, series_meta in enumerate(filepaths):
 
 
 
 
 
 
 
 
 
280
  yield key, {
281
+ "img_path": series_meta["img_cache_path"],
282
+ "seg_path": series_meta["seg_cache_path"],
283
  "metadata": {
284
+ "series_id": series_meta["series_id"],
285
  "patient_id": series_meta["patient_id"],
286
  "incomplete_organ": series_meta["incomplete_organ"],
287
  "aortic_hu": series_meta["aortic_hu"],
 
292
  }
293
 
294
  elif self.config.name == "classification-with-mask":
295
+ for key, series_meta in enumerate(filepaths):
 
 
 
 
 
 
 
 
 
 
296
  label_data = (
297
+ self.labels_df.loc[
298
+ self.labels_df["patient_id"] == series_meta["patient_id"]
299
+ ]
300
  .iloc[0]
301
  .to_dict()
302
  )
303
 
304
  yield key, {
305
+ "img_path": series_meta["img_cache_path"],
306
+ "seg_path": series_meta["seg_cache_path"],
307
  "bowel": np.argmax(
308
  [label_data["bowel_healthy"], label_data["bowel_injury"]]
309
  ),
 
336
  ),
337
  "any_injury": label_data["any_injury"],
338
  "metadata": {
339
+ "series_id": series_meta["series_id"],
340
  "patient_id": series_meta["patient_id"],
341
  "incomplete_organ": series_meta["incomplete_organ"],
342
  "aortic_hu": series_meta["aortic_hu"],
 
346
  },
347
  }
348
  else:
349
+ for key, series_meta in enumerate(filepaths):
 
 
 
 
 
 
 
 
350
  label_data = (
351
+ self.labels_df.loc[
352
+ self.labels_df["patient_id"] == series_meta["patient_id"]
353
+ ]
354
  .iloc[0]
355
  .to_dict()
356
  )
357
 
358
  yield key, {
359
+ "img_path": series_meta["img_cache_path"],
360
  "bowel": np.argmax(
361
  [label_data["bowel_healthy"], label_data["bowel_injury"]]
362
  ),
 
389
  ),
390
  "any_injury": label_data["any_injury"],
391
  "metadata": {
392
+ "series_id": series_meta["series_id"],
393
  "patient_id": series_meta["patient_id"],
394
  "incomplete_organ": series_meta["incomplete_organ"],
395
  "aortic_hu": series_meta["aortic_hu"],