|
import io |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {selfies_and_id}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
4083 sets, which includes 2 photos of a person from his documents and |
|
13 selfies. 571 sets of Hispanics and 3512 sets of Caucasians. |
|
Photo documents contains only a photo of a person. |
|
All personal information from the document is hidden. |
|
""" |
|
_NAME = 'selfies_and_id' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class SelfiesAndId(datasets.GeneratorBasedBuilder): |
|
"""Small sample of image-text pairs""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'id_1': datasets.Image(), |
|
'id_2': datasets.Image(), |
|
'selfie_1': datasets.Image(), |
|
'selfie_2': datasets.Image(), |
|
'selfie_3': datasets.Image(), |
|
'selfie_4': datasets.Image(), |
|
'selfie_5': datasets.Image(), |
|
'selfie_6': datasets.Image(), |
|
'selfie_7': datasets.Image(), |
|
'selfie_8': datasets.Image(), |
|
'selfie_9': datasets.Image(), |
|
'selfie_10': datasets.Image(), |
|
'selfie_11': datasets.Image(), |
|
'selfie_12': datasets.Image(), |
|
'selfie_13': datasets.Image(), |
|
'user_id': datasets.Value('string'), |
|
'set_id': datasets.Value('string'), |
|
'user_race': datasets.Value('string'), |
|
'name': datasets.Value('string'), |
|
'age': datasets.Value('int8'), |
|
'country': datasets.Value('string'), |
|
'gender': datasets.Value('string') |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
images = dl_manager.download(f"{_DATA}images.tar.gz") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
images = dl_manager.iter_archive(images) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": images, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, images, annotations): |
|
annotations_df = pd.read_csv(annotations, sep=';') |
|
images_data = pd.DataFrame(columns=['URL', 'Bytes']) |
|
for idx, (image_path, image) in enumerate(images): |
|
images_data.loc[idx] = {'URL': image_path, 'Bytes': image.read()} |
|
|
|
annotations_df = pd.merge(annotations_df, |
|
images_data, |
|
how='left', |
|
on=['URL']) |
|
for idx, worker_id in enumerate(pd.unique(annotations_df['UserId'])): |
|
annotation = annotations_df.loc[annotations_df['UserId'] == |
|
worker_id] |
|
annotation = annotation.sort_values(['FName']) |
|
data = { |
|
row[5].lower(): { |
|
'path': row[6], |
|
'bytes': row[10] |
|
} for row in annotation.itertuples() |
|
} |
|
|
|
age = annotation.loc[annotation['FName'] == |
|
'ID_1']['Age'].values[0] |
|
country = annotation.loc[annotation['FName'] == |
|
'ID_1']['Country'].values[0] |
|
gender = annotation.loc[annotation['FName'] == |
|
'ID_1']['Gender'].values[0] |
|
set_id = annotation.loc[annotation['FName'] == |
|
'ID_1']['SetId'].values[0] |
|
user_race = annotation.loc[annotation['FName'] == |
|
'ID_1']['UserRace'].values[0] |
|
name = annotation.loc[annotation['FName'] == |
|
'ID_1']['Name'].values[0] |
|
|
|
data['user_id'] = worker_id |
|
data['age'] = age |
|
data['country'] = country |
|
data['gender'] = gender |
|
data['set_id'] = set_id |
|
data['user_race'] = user_race |
|
data['name'] = name |
|
|
|
yield idx, data |
|
|