|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from pathlib import Path |
|
|
|
import datasets |
|
from datasets.tasks import ImageClassification |
|
import numpy as np |
|
|
|
_CITATION = """\ |
|
@article{FeiFei2004LearningGV, |
|
title={Learning Generative Visual Models from Few Training Examples: An Incremental Bayesian Approach Tested on 101 Object Categories}, |
|
author={Li Fei-Fei and Rob Fergus and Pietro Perona}, |
|
journal={Computer Vision and Pattern Recognition Workshop}, |
|
year={2004}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Pictures of objects belonging to 101 categories. |
|
About 40 to 800 images per category. |
|
Most categories have about 50 images. |
|
Collected in September 2003 by Fei-Fei Li, Marco Andreetto, and Marc'Aurelio Ranzato. |
|
The size of each image is roughly 300 x 200 pixels. |
|
""" |
|
|
|
_HOMEPAGE = "https://data.caltech.edu/records/20086" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
_DATA_URL = "brand_new_data/caltech-101/101_ObjectCategories.tar.gz" |
|
|
|
_NAMES = [ |
|
"accordion", |
|
"airplanes", |
|
"anchor", |
|
"ant", |
|
"background_google", |
|
"barrel", |
|
"bass", |
|
"beaver", |
|
"binocular", |
|
"bonsai", |
|
"brain", |
|
"brontosaurus", |
|
"buddha", |
|
"butterfly", |
|
"camera", |
|
"cannon", |
|
"car_side", |
|
"ceiling_fan", |
|
"cellphone", |
|
"chair", |
|
"chandelier", |
|
"cougar_body", |
|
"cougar_face", |
|
"crab", |
|
"crayfish", |
|
"crocodile", |
|
"crocodile_head", |
|
"cup", |
|
"dalmatian", |
|
"dollar_bill", |
|
"dolphin", |
|
"dragonfly", |
|
"electric_guitar", |
|
"elephant", |
|
"emu", |
|
"euphonium", |
|
"ewer", |
|
"faces", |
|
"faces_easy", |
|
"ferry", |
|
"flamingo", |
|
"flamingo_head", |
|
"garfield", |
|
"gerenuk", |
|
"gramophone", |
|
"grand_piano", |
|
"hawksbill", |
|
"headphone", |
|
"hedgehog", |
|
"helicopter", |
|
"ibis", |
|
"inline_skate", |
|
"joshua_tree", |
|
"kangaroo", |
|
"ketch", |
|
"lamp", |
|
"laptop", |
|
"leopards", |
|
"llama", |
|
"lobster", |
|
"lotus", |
|
"mandolin", |
|
"mayfly", |
|
"menorah", |
|
"metronome", |
|
"minaret", |
|
"motorbikes", |
|
"nautilus", |
|
"octopus", |
|
"okapi", |
|
"pagoda", |
|
"panda", |
|
"pigeon", |
|
"pizza", |
|
"platypus", |
|
"pyramid", |
|
"revolver", |
|
"rhino", |
|
"rooster", |
|
"saxophone", |
|
"schooner", |
|
"scissors", |
|
"scorpion", |
|
"sea_horse", |
|
"snoopy", |
|
"soccer_ball", |
|
"stapler", |
|
"starfish", |
|
"stegosaurus", |
|
"stop_sign", |
|
"strawberry", |
|
"sunflower", |
|
"tick", |
|
"trilobite", |
|
"umbrella", |
|
"watch", |
|
"water_lilly", |
|
"wheelchair", |
|
"wild_cat", |
|
"windsor_chair", |
|
"wrench", |
|
"yin_yang", |
|
] |
|
|
|
_TRAIN_POINTS_PER_CLASS = 30 |
|
|
|
|
|
class Caltech101(datasets.GeneratorBasedBuilder): |
|
"""Caltech 101 dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.features.ClassLabel(names=_NAMES), |
|
} |
|
), |
|
supervised_keys=("image", "label"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
task_templates=ImageClassification( |
|
image_column="image", label_column="label" |
|
), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_DATA_URL) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir, |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
is_train_split = (split == "train") |
|
data_dir = Path(filepath) / "101_ObjectCategories" |
|
|
|
|
|
numpy_original_state = np.random.get_state() |
|
np.random.seed(1234) |
|
|
|
for class_dir in data_dir.iterdir(): |
|
fnames = [image_path for image_path in class_dir.iterdir() if image_path.name.endswith(".jpg")] |
|
assert [image_path for image_path in class_dir.iterdir() if not image_path.name.endswith(".jpg")] == [] |
|
|
|
|
|
if _TRAIN_POINTS_PER_CLASS > len(fnames): |
|
raise ValueError("Fewer than {} ({}) points in class {}".format( |
|
_TRAIN_POINTS_PER_CLASS, len(fnames), class_dir.name)) |
|
train_fnames = np.random.choice( |
|
fnames, _TRAIN_POINTS_PER_CLASS, replace=False) |
|
test_fnames = set(fnames).difference(train_fnames) |
|
fnames_to_emit = train_fnames if is_train_split else test_fnames |
|
|
|
for image_file in fnames_to_emit: |
|
record = { |
|
"image": str(image_file), |
|
"label": class_dir.name.lower(), |
|
} |
|
yield "%s/%s" % (class_dir.name.lower(), image_file), record |
|
|
|
np.random.set_state(numpy_original_state) |
|
|