Yuhan Hou commited on
Commit
02ffd0b
1 Parent(s): ba561f7
Files changed (1) hide show
  1. FracAtlas_dataset.py +0 -257
FracAtlas_dataset.py DELETED
@@ -1,257 +0,0 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
- """
4
- Created on Sun Feb 18 23:13:51 2024
5
-
6
- @author: houyuhan
7
- """
8
-
9
-
10
- #Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
11
- #
12
- # Licensed under the Apache License, Version 2.0 (the "License");
13
- # you may not use this file except in compliance with the License.
14
- # You may obtain a copy of the License at
15
- #
16
- # http://www.apache.org/licenses/LICENSE-2.0
17
- #
18
- # Unless required by applicable law or agreed to in writing, software
19
- # distributed under the License is distributed on an "AS IS" BASIS,
20
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
- # See the License for the specific language governing permissions and
22
- # limitations under the License.
23
- """
24
- FracAtlas Dataset Loader
25
-
26
- This script provides a Hugging Face `datasets` loader for the FracAtlas dataset, a comprehensive collection
27
- of musculoskeletal radiographs aimed at advancing research in fracture classification, localization, and segmentation.
28
- The dataset includes high-quality X-Ray images accompanied by detailed annotations in COCO JSON format for segmentation
29
- and bounding box information, as well as PASCAL VOC XML files for additional localization data.
30
-
31
- The loader handles downloading and preparing the dataset, making it readily available for machine learning models and analysis
32
- tasks in medical imaging, especially focusing on the detection and understanding of bone fractures.
33
-
34
- License: CC-BY 4.0
35
- """
36
-
37
-
38
- import csv
39
- import json
40
- import os
41
- from typing import List
42
- import datasets
43
- import logging
44
- import pandas as pd
45
- from sklearn.model_selection import train_test_split
46
- import shutil
47
- import xml.etree.ElementTree as ET
48
- from datasets import load_dataset
49
-
50
-
51
-
52
- # TODO: Add BibTeX citation
53
- # Find for instance the citation on arxiv or on the dataset repo/website
54
- _CITATION = """\
55
- @InProceedings{huggingface:yh0701/FracAtlas_dataset,
56
- title = {FracAtlas: A Dataset for Fracture Classification, Localization and Segmentation of Musculoskeletal Radiographs},
57
- author={Abedeen, Iftekharul; Rahman, Md. Ashiqur; Zohra Prottyasha, Fatema; Ahmed, Tasnim; Mohmud Chowdhury, Tareque; Shatabda, Swakkhar},
58
- year={2023}
59
- }
60
- """
61
-
62
- # TODO: Add description of the dataset here
63
- # You can copy an official description
64
- _DESCRIPTION = """\
65
- The "FracAtlas" dataset is a collection of musculoskeletal radiographs for fracture classification, localization, and segmentation.
66
- It includes 4,083 X-Ray images with annotations in multiple formats.The annotations include bbox, segmentations, and etc.
67
- The dataset is intended for use in deep learning tasks in medical imaging, specifically targeting the understanding of bone fractures.
68
- It is freely available under a CC-BY 4.0 license.
69
- """
70
-
71
- # TODO: Add a link to an official homepage for the dataset here
72
- _HOMEPAGE = "https://figshare.com/articles/dataset/The_dataset/22363012"
73
-
74
- # TODO: Add the licence for the dataset here if you can find it
75
- _LICENSE = "The dataset is licensed under a CC-BY 4.0 license."
76
-
77
- # TODO: Add link to the official dataset URLs here
78
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
79
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
80
- _URL = "https://figshare.com/ndownloader/files/43283628"
81
-
82
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
83
- class FracAtlasDataset(datasets.GeneratorBasedBuilder):
84
- """TODO: Short description of my dataset."""
85
-
86
- _URL = _URL
87
- VERSION = datasets.Version("1.1.0")
88
-
89
- def _info(self):
90
- return datasets.DatasetInfo(
91
- description=_DESCRIPTION,
92
- features=datasets.Features(
93
- {
94
- "image_id": datasets.Value("string"),
95
- "image": datasets.Image(),
96
- "hand": datasets.ClassLabel(num_classes=2,names=['no_hand_fracture','hand_fracture']),
97
- "leg": datasets.ClassLabel(num_classes=2,names=['no_leg_fracture','leg_fracture']),
98
- "hip": datasets.ClassLabel(num_classes=2,names=['no_hip_fracture','hip_fracture']),
99
- "shoulder": datasets.ClassLabel(num_classes=2,names=['no_shoulder_fracture','shoulder_fracture']),
100
- "mixed": datasets.ClassLabel(num_classes=2,names=['not_mixed','mixed']),
101
- "hardware": datasets.ClassLabel(num_classes=2,names=['no_hardware','hardware']),
102
- "multiscan": datasets.ClassLabel(num_classes=2,names=['not_multiscan','multiscan']),
103
- "fractured": datasets.ClassLabel(num_classes=2,names=['not_fractured','fractured']),
104
- "fracture_count": datasets.Value("int32"),
105
- "frontal": datasets.ClassLabel(num_classes=2,names=['not_frontal','frontal']),
106
- "lateral": datasets.ClassLabel(num_classes=2,names=['not_lateral','lateral']),
107
- "oblique": datasets.ClassLabel(num_classes=2,names=['not_oblique','oblique']),
108
- "localization_metadata": datasets.Features({
109
- "width": datasets.Value("int32"),
110
- "height": datasets.Value("int32"),
111
- "depth": datasets.Value("int32"),
112
- }),
113
- "segmentation_metadata": datasets.Features({
114
- "segmentation": datasets.Sequence(datasets.Sequence(datasets.Value("float"))),
115
- "bbox": datasets.Sequence(datasets.Value("float")),
116
- "area": datasets.Value("float")
117
- }) or None
118
- }
119
- ),
120
- # No default supervised_keys (as we have to pass both question
121
- # and context as input).
122
- supervised_keys=None,
123
- homepage=_HOMEPAGE,
124
- citation=_CITATION
125
- )
126
-
127
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
128
- url_to_download = self._URL
129
- downloaded_files = dl_manager.download_and_extract(url_to_download)
130
-
131
- # Adjusted path to include 'FracAtlas' directory
132
- base_path = os.path.join(downloaded_files, 'FracAtlas')
133
-
134
- # Split the dataset to train/test/validation by 0.7,0.15,0.15
135
- df = pd.read_csv(os.path.join(base_path, 'dataset.csv'))
136
- train_df, test_df = train_test_split(df, test_size=0.3)
137
- validation_df, test_df = train_test_split(test_df, test_size=0.5)
138
-
139
- # store them back as csv
140
- train_df.to_csv(os.path.join(base_path, 'train_dataset.csv'), index=False)
141
- validation_df.to_csv(os.path.join(base_path, 'validation_dataset.csv'), index=False)
142
- test_df.to_csv(os.path.join(base_path, 'test_dataset.csv'), index=False)
143
-
144
- annotations_path = os.path.join(base_path, 'Annotations/COCO JSON/COCO_fracture_masks.json')
145
- images_path = os.path.join(base_path, 'images')
146
- localization_path = os.path.join(base_path, 'Annotations/PASCAL VOC')
147
-
148
- return [
149
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'train_dataset.csv'),
150
- "images_path": images_path,
151
- "annotations_path": annotations_path,
152
- "localization_path":localization_path
153
- }),
154
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'validation_dataset.csv'),
155
- "images_path": images_path,
156
- "annotations_path": annotations_path,
157
- "localization_path":localization_path
158
- }),
159
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"dataset_csv_path": os.path.join(base_path, 'test_dataset.csv'),
160
- "images_path": images_path,
161
- "annotations_path": annotations_path,
162
- "localization_path":localization_path
163
- })
164
- ]
165
-
166
- def _generate_examples(self, annotations_path, images_path, dataset_csv_path,localization_path):
167
- logging.info("Generating examples from = %s", dataset_csv_path)
168
- split_df = pd.read_csv(dataset_csv_path) # Load the DataFrame for the current split
169
-
170
- # Function to convert numeric ID to formatted string
171
- def format_image_id(numeric_id):
172
- return f"IMG{numeric_id:07d}.jpg" # Adjust format as needed
173
-
174
- # Function to extract information from xml files
175
- def parse_xml(xml_path):
176
- tree = ET.parse(xml_path)
177
- root = tree.getroot()
178
-
179
- # Extract the necessary information
180
- width = int(root.find("./size/width").text)
181
- height = int(root.find("./size/height").text)
182
- depth = int(root.find("./size/depth").text)
183
- segmented = int(root.find("./segmented").text)
184
- return width, height, depth, segmented
185
-
186
- # Load annotations
187
- with open(annotations_path) as file:
188
- annotations_json = json.load(file)
189
-
190
- for item in annotations_json['annotations']:
191
- item['image_id'] = format_image_id(item['image_id'])
192
-
193
- annotations = {item['image_id']: item for item in annotations_json['annotations']}
194
-
195
-
196
- # Iterate through each row in the split DataFrame
197
- for _, row in split_df.iterrows():
198
- image_id = row['image_id']
199
- # Determine the folder based on the 'fractured' column
200
- folder = 'Fractured' if row['fractured'] == 1 else 'Non_fractured'
201
-
202
- # Check if the formatted_image_id exists in annotations
203
- annotation = annotations.get(image_id)
204
- image_path = os.path.join(images_path, folder, image_id)
205
-
206
- # Initialize variables
207
- segmentation, bbox, area = None, None, None
208
- segmentation_metadata = None
209
-
210
- if annotation:
211
- segmentation = annotation.get('segmentation')
212
- bbox = annotation.get('bbox')
213
- area = annotation.get('area')
214
-
215
- segmentation_metadata = {
216
- 'segmentation': segmentation,
217
- 'bbox':bbox,
218
- 'area': area
219
- }
220
- else:
221
- segmentation_metadata = None # Default if not present
222
-
223
- xml_file_name = f"{image_id.split('.')[0]}.xml"
224
- xml_path = os.path.join(localization_path, xml_file_name)
225
-
226
- # Parse the XML file
227
- width, height, depth, _ = parse_xml(xml_path)
228
-
229
- localization_metadata = {
230
- 'width': width,
231
- "height":height,
232
- 'depth': depth
233
- }
234
-
235
-
236
- # Construct example data
237
- example_data = {
238
- "image_id": row['image_id'],
239
- "image":image_path,
240
- "hand": row["hand"],
241
- "leg": row["leg"],
242
- "hip": row["hip"],
243
- "shoulder": row["shoulder"],
244
- "mixed": row["mixed"],
245
- "hardware": row["hardware"],
246
- "multiscan": row["multiscan"],
247
- "fractured": row["fractured"],
248
- "fracture_count": row["fracture_count"],
249
- "frontal": row["frontal"],
250
- "lateral": row["lateral"],
251
- "oblique": row["oblique"],
252
- "localization_metadata": localization_metadata,
253
- 'segmentation_metadata': segmentation_metadata
254
- }
255
- yield image_id, example_data
256
-
257
-