renovation / renovation.py
rshrott's picture
Update renovation.py
ed78f7e verified
raw
history blame contribute delete
No virus
4.36 kB
import os
import glob
import random
import datasets
from datasets.tasks import ImageClassification
from datasets import load_dataset
import os
from huggingface_hub import login
_HOMEPAGE = "https://github.com/your-github/renovation"
_CITATION = """\
@ONLINE {renovationdata,
author="Your Name",
title="Renovation dataset",
month="January",
year="2023",
url="https://github.com/your-github/renovation"
}
"""
_DESCRIPTION = """\
Renovations is a dataset of images of houses taken in the field using smartphone
cameras. It consists of 7 classes: Not Applicable, Poor, Fair, Good, and Great renovations.
Data was collected by the your research lab.
"""
_URLS = {
"Not Applicable": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Not Applicable.zip",
"Poor": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Poor.zip",
"Fair": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Fair.zip",
"Good": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Good.zip",
"Great": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Great.zip",
"Excellent": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/Excellent.zip"
}
_NAMES = ["Not Applicable", "Poor", "Fair", "Good", "Great", "Excellent"]
class Renovations(datasets.GeneratorBasedBuilder):
"""Renovations house images dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[ImageClassification(image_column="image", label_column="labels")],
)
def _split_generators(self, dl_manager):
data_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_files": data_files,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_files": data_files,
"split": "val",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_files": data_files,
"split": "test",
},
),
]
def _generate_examples(self, data_files, split):
# Separate data by class
data_by_class = {label: [] for label in _NAMES}
allowed_extensions = {'.jpeg', '.jpg'}
for label, path in data_files.items():
files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and os.path.splitext(f)[1] in allowed_extensions]
data_by_class[label].extend((file, label) for file in files)
# Shuffle and split data for each class
random.seed(43) # ensure reproducibility
train_data, test_data, val_data = [], [], []
for label, files_and_labels in data_by_class.items():
random.shuffle(files_and_labels)
num_files = len(files_and_labels)
train_end = int(num_files * 0.85)
test_end = int(num_files * 0.95)
train_data.extend(files_and_labels[:train_end])
test_data.extend(files_and_labels[train_end:test_end])
val_data.extend(files_and_labels[test_end:])
# Select the appropriate split
if split == "train":
data_to_use = train_data
elif split == "test":
data_to_use = test_data
else: # "val" split
data_to_use = val_data
# Yield examples
for idx, (file, label) in enumerate(data_to_use):
yield idx, {
"image_file_path": file,
"image": file,
"labels": label,
}