File size: 4,356 Bytes
d61a5ba
f4ce00e
d2395f0
d61a5ba
6f69865
fc74c6f
ad204cb
 
 
6f69865
fc74c6f
 
6f69865
1af3915
6f69865
 
 
 
fc74c6f
 
 
 
6f69865
d141d1c
6f69865
fc74c6f
 
498fbdd
ad204cb
 
 
 
2f69b3c
c18cbec
498fbdd
fc74c6f
c18cbec
6f69865
 
ea0a244
fc74c6f
 
 
 
 
3f0c3b8
d61a5ba
4058cac
fc74c6f
 
4058cac
fc74c6f
 
4058cac
fc74c6f
 
eb03b30
ad204cb
eb03b30
 
 
 
ad204cb
5cd88e4
eb03b30
 
 
 
 
ad204cb
5cd88e4
eb03b30
 
 
 
 
ad204cb
5cd88e4
eb03b30
 
 
a186294
ad204cb
d9a45b3
 
34ea36d
ad204cb
0efec3e
d9a45b3
2f69b3c
0efec3e
d9a45b3
96fd061
d9a45b3
 
 
 
ed78f7e
 
d9a45b3
 
 
2f69b3c
d9a45b3
a9126ce
7304059
2f69b3c
 
 
 
 
d9a45b3
7304059
a9126ce
 
 
 
a186294
4b10b88
5cd88e4
2f69b3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
import glob
import random

import datasets
from datasets.tasks import ImageClassification
from datasets import load_dataset
import os
from huggingface_hub import login
_HOMEPAGE = "https://github.com/your-github/renovation"

_CITATION = """\
@ONLINE {renovationdata,
    author="Your Name",
    title="Renovation dataset",
    month="January",
    year="2023",
    url="https://github.com/your-github/renovation"
}
"""

_DESCRIPTION = """\
Renovations is a dataset of images of houses taken in the field using smartphone
cameras. It consists of 7 classes: Not Applicable, Poor, Fair, Good, and Great  renovations.
Data was collected by the your research lab.
"""

_URLS = {
    "Not Applicable": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Not Applicable.zip",
    "Poor": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Poor.zip",
    "Fair": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Fair.zip",
    "Good": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Good.zip",
    "Great": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Great.zip",
    "Excellent": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Excellent.zip"
}

_NAMES = ["Not Applicable", "Poor", "Fair", "Good", "Great", "Excellent"]
class Renovations(datasets.GeneratorBasedBuilder):
    """Renovations house images dataset."""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "image_file_path": datasets.Value("string"),
                    "image": datasets.Image(),
                    "labels": datasets.features.ClassLabel(names=_NAMES),
                }
            ),
            supervised_keys=("image", "labels"),
            homepage=_HOMEPAGE,
            citation=_CITATION,
            task_templates=[ImageClassification(image_column="image", label_column="labels")],
        )

    def _split_generators(self, dl_manager):
        data_files = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data_files": data_files,
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "data_files": data_files,
                    "split": "val",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "data_files": data_files,
                    "split": "test",
                },
            ),
        ]

    def _generate_examples(self, data_files, split):
        # Separate data by class
        data_by_class = {label: [] for label in _NAMES}
        allowed_extensions = {'.jpeg', '.jpg'}
        for label, path in data_files.items():
            files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and os.path.splitext(f)[1] in allowed_extensions]
            data_by_class[label].extend((file, label) for file in files)
    
    
        # Shuffle and split data for each class
        random.seed(43)  # ensure reproducibility
        train_data, test_data, val_data = [], [], []
        for label, files_and_labels in data_by_class.items():
            random.shuffle(files_and_labels)
            num_files = len(files_and_labels)
            train_end = int(num_files * 0.85)
            test_end = int(num_files * 0.95)
            train_data.extend(files_and_labels[:train_end])
            test_data.extend(files_and_labels[train_end:test_end])
            val_data.extend(files_and_labels[test_end:])
    
        # Select the appropriate split
        if split == "train":
            data_to_use = train_data
        elif split == "test":
            data_to_use = test_data
        else:  # "val" split
            data_to_use = val_data
    
        # Yield examples
        for idx, (file, label) in enumerate(data_to_use):
            yield idx, {
                "image_file_path": file,
                "image": file,
                "labels": label,
            }