Update rsna-2023-abdominal-trauma-detection.py
Browse files
rsna-2023-abdominal-trauma-detection.py
CHANGED
@@ -2,6 +2,7 @@ import urllib
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
import datasets
|
|
|
5 |
|
6 |
|
7 |
_CITATION = """\
|
@@ -173,16 +174,39 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
|
|
173 |
or self.config.name == "segmentation"
|
174 |
):
|
175 |
series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
lambda x: urllib.parse.urljoin(
|
179 |
-
_URL,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
),
|
181 |
axis=1,
|
182 |
).tolist()
|
183 |
)
|
184 |
-
|
185 |
-
|
186 |
lambda x: urllib.parse.urljoin(
|
187 |
_URL, f"segmentations/{int(x['series_id'])}.nii.gz"
|
188 |
),
|
@@ -190,26 +214,52 @@ class RSNA2023AbdominalTraumaDetectionSegmentation(datasets.GeneratorBasedBuilde
|
|
190 |
).tolist()
|
191 |
)
|
192 |
else:
|
193 |
-
|
194 |
-
series_meta_df.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
lambda x: urllib.parse.urljoin(
|
196 |
-
_URL,
|
|
|
197 |
),
|
198 |
axis=1,
|
199 |
).tolist()
|
200 |
)
|
201 |
-
|
|
|
202 |
|
203 |
return [
|
204 |
datasets.SplitGenerator(
|
205 |
-
name=datasets.Split.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
gen_kwargs={
|
207 |
-
"series_ids":
|
208 |
"dicom_tags_file": dicom_tags_file,
|
209 |
"series_meta_file": series_meta_file,
|
210 |
"labels_file": labels_file,
|
211 |
-
"img_files":
|
212 |
-
"seg_files":
|
213 |
},
|
214 |
),
|
215 |
]
|
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
import datasets
|
5 |
+
from sklearn.model_selection import train_test_split
|
6 |
|
7 |
|
8 |
_CITATION = """\
|
|
|
174 |
or self.config.name == "segmentation"
|
175 |
):
|
176 |
series_meta_df = series_meta_df.loc[series_meta_df["has_segmentation"] == 1]
|
177 |
+
|
178 |
+
train_series_meta_df, test_series_meta_df = train_test_split(
|
179 |
+
series_meta_df, test_size=0.1, random_state=42, shuffle=True
|
180 |
+
)
|
181 |
+
|
182 |
+
train_img_files = dl_manager.download(
|
183 |
+
train_series_meta_df.apply(
|
184 |
+
lambda x: urllib.parse.urljoin(
|
185 |
+
_URL,
|
186 |
+
f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
|
187 |
+
),
|
188 |
+
axis=1,
|
189 |
+
).tolist()
|
190 |
+
)
|
191 |
+
test_img_files = dl_manager.download(
|
192 |
+
test_series_meta_df.apply(
|
193 |
lambda x: urllib.parse.urljoin(
|
194 |
+
_URL,
|
195 |
+
f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
|
196 |
+
),
|
197 |
+
axis=1,
|
198 |
+
).tolist()
|
199 |
+
)
|
200 |
+
train_seg_files = dl_manager.download(
|
201 |
+
train_series_meta_df.apply(
|
202 |
+
lambda x: urllib.parse.urljoin(
|
203 |
+
_URL, f"segmentations/{int(x['series_id'])}.nii.gz"
|
204 |
),
|
205 |
axis=1,
|
206 |
).tolist()
|
207 |
)
|
208 |
+
test_seg_files = dl_manager.download(
|
209 |
+
train_series_meta_df.apply(
|
210 |
lambda x: urllib.parse.urljoin(
|
211 |
_URL, f"segmentations/{int(x['series_id'])}.nii.gz"
|
212 |
),
|
|
|
214 |
).tolist()
|
215 |
)
|
216 |
else:
|
217 |
+
train_series_meta_df, test_series_meta_df = train_test_split(
|
218 |
+
series_meta_df, test_size=0.1, random_state=42, shuffle=True
|
219 |
+
)
|
220 |
+
|
221 |
+
train_img_files = dl_manager.download(
|
222 |
+
train_series_meta_df.apply(
|
223 |
+
lambda x: urllib.parse.urljoin(
|
224 |
+
_URL,
|
225 |
+
f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
|
226 |
+
),
|
227 |
+
axis=1,
|
228 |
+
).tolist()
|
229 |
+
)
|
230 |
+
test_img_files = dl_manager.download(
|
231 |
+
test_series_meta_df.apply(
|
232 |
lambda x: urllib.parse.urljoin(
|
233 |
+
_URL,
|
234 |
+
f"train_images/{int(x['patient_id'])}/{int(x['series_id'])}.nii.gz",
|
235 |
),
|
236 |
axis=1,
|
237 |
).tolist()
|
238 |
)
|
239 |
+
train_seg_files = None
|
240 |
+
test_seg_files = None
|
241 |
|
242 |
return [
|
243 |
datasets.SplitGenerator(
|
244 |
+
name=datasets.Split.TRAIN,
|
245 |
+
gen_kwargs={
|
246 |
+
"series_ids": train_series_meta_df["series_id"].tolist(),
|
247 |
+
"dicom_tags_file": dicom_tags_file,
|
248 |
+
"series_meta_file": series_meta_file,
|
249 |
+
"labels_file": labels_file,
|
250 |
+
"img_files": train_img_files,
|
251 |
+
"seg_files": train_seg_files,
|
252 |
+
},
|
253 |
+
),
|
254 |
+
datasets.SplitGenerator(
|
255 |
+
name=datasets.Split.TEST,
|
256 |
gen_kwargs={
|
257 |
+
"series_ids": test_series_meta_df["series_id"].tolist(),
|
258 |
"dicom_tags_file": dicom_tags_file,
|
259 |
"series_meta_file": series_meta_file,
|
260 |
"labels_file": labels_file,
|
261 |
+
"img_files": test_img_files,
|
262 |
+
"seg_files": test_seg_files,
|
263 |
},
|
264 |
),
|
265 |
]
|