import json import os import datasets import pandas as pd from PIL import Image class ArtelingoBuilderConfig(datasets.BuilderConfig): def __init__(self, name, splits, **kwargs): super().__init__(name, **kwargs) self.splits = splits # Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @inproceedings{mohamed2022artelingo, title={ArtELingo: A Million Emotion Annotations of WikiArt with Emphasis on Diversity over Language and Culture}, author={Mohamed, Youssef and Abdelfattah, Mohamed and Alhuwaider, Shyma and Li, Feifan and Zhang, Xiangliang and Church, Kenneth and Elhoseiny, Mohamed}, booktitle={Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing}, pages={8770--8785}, year={2022} } """ # Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ ArtELingo is a benchmark and dataset having a collection of 80,000 artworks from WikiArt with 1.2 Million annotations in English, Arabic, and Chinese. """ # Add a link to an official homepage for the dataset here _HOMEPAGE = "https://www.artelingo.org/" # Add the licence for the dataset here if you can find it _LICENSE = "Terms of Use: Before we are able to offer you access to the database, \ please agree to the following terms of use. After approval, you (the 'Researcher') \ receive permission to use the ArtELingo database (the 'Database') at King Abdullah \ University of Science and Technology (KAUST). In exchange for being able to join the \ ArtELingo community and receive such permission, Researcher hereby agrees to the \ following terms and conditions: [1.] The Researcher shall use the Database only for \ non-commercial research and educational purposes. [2.] The Universities make no \ representations or warranties regarding the Database, including but not limited to \ warranties of non-infringement or fitness for a particular purpose. [3.] Researcher \ accepts full responsibility for his or her use of the Database and shall defend and \ indemnify the Universities, including their employees, Trustees, officers and agents, \ against any and all claims arising from Researcher's use of the Database, and \ Researcher's use of any copies of copyrighted 2D artworks originally uploaded to \ http://www.wikiart.org that the Researcher may use in connection with the Database. \ [4.] Researcher may provide research associates and colleagues with access to the \ Database provided that they first agree to be bound by these terms and conditions. \ [5.] The Universities reserve the right to terminate Researcher's access to the Database \ at any time. [6.] If Researcher is employed by a for-profit, commercial entity, \ Researcher's employer shall also be bound by these terms and conditions, and Researcher \ hereby represents that he or she is fully authorized to enter into this agreement on \ behalf of such employer. [7.] The international copyright laws shall apply to all \ disputes under this agreement." # Add link to the official dataset URLs here # The HuggingFace dataset library don't host the datasets but only point to the original files # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) # This script can work with local (downloaded) files. _URLs = { 'val': 'https://artelingo.s3.amazonaws.com/val.zip', 'test': 'https://artelingo.s3.amazonaws.com/test.zip', 'train': 'https://artelingo.s3.amazonaws.com/train.zip', 'wecia-emo_dev': 'https://artelingo.s3.amazonaws.com/wecia_emo_dev.zip', 'wecia-cap_dev': 'https://artelingo.s3.amazonaws.com/wecia_cap_dev.zip', 'wecia-emo_hidden': 'https://artelingo.s3.amazonaws.com/wecia_emo_hidden.zip', 'wecia-cap_hidden': 'https://artelingo.s3.amazonaws.com/wecia_cap_hidden.zip', } # _URL_ANN = "https://artelingo.s3.amazonaws.com/artelingo_release_lite.csv" _EMOTIONS = ['contentment', 'awe', 'amusement', 'excitement', 'sadness', 'fear', 'anger', 'disgust', 'something else'] # Name of the dataset usually match the script name with CamelCase instead of snake_case class Artelingo(datasets.GeneratorBasedBuilder): """An example dataset script to work with ArtELingo dataset""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ ArtelingoBuilderConfig(name='artelingo', splits=['train', 'val', 'test'], version=VERSION, description="The full ArtELingo dataset"), ArtelingoBuilderConfig(name='dev', splits=['val', 'test'], version=VERSION, description="The Test and Val subsets of ArtELingo"), ArtelingoBuilderConfig(name='wecia-emo', splits=['dev'], version=VERSION, description="The Dev set of the WECIA Emotion Prediction challenge"), ArtelingoBuilderConfig(name='wecia-cap', splits=['dev'], version=VERSION, description="The Dev set of the WECIA Affective Caption Generation challenge"), ] DEFAULT_CONFIG_NAME = "artelingo" def _info(self): # This method specifies the datasets. DatasetInfo object which contains informations and typings for the dataset feature_dict = { "uid": datasets.Value("int32"), 'image': datasets.Image(), "art_style": datasets.Value("string"), "painting": datasets.Value("string"), # "emotion": datasets.ClassLabel(names=_EMOTIONS), "emotion": datasets.Value("string"), "language": datasets.Value("string"), "text": datasets.Value("string"), } features = datasets.Features(feature_dict) return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=features, # Here we define them above because they are different between the two configurations # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name data_dir = self.config.data_dir if data_dir is None: data_dir = {} prefix = self.config.name + '_' if 'wecia' in self.config.name else '' for split in self.config.splits: data_dir[split] = dl_manager.download_and_extract(_URLs[prefix + split]) # data_dir = dl_manager.download_and_extract(_URLs) splits = [] for split in self.config.splits: dataset = datasets.SplitGenerator( name=split, # These kwargs will be passed to _generate_examples gen_kwargs={ "metadata": os.path.join(data_dir[split], split, "metadata.csv"), "image_dir": os.path.join(data_dir[split], split), } ) splits.append(dataset) return splits def _generate_examples( # method parameters are unpacked from `gen_kwargs` as given in `_split_generators` self, metadata, image_dir ): """ Yields examples as (key, example) tuples. """ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is here for legacy reason (tfds) and is not important in itself. name = self.config.name df = pd.read_csv(metadata) uids = range(len(df)) if name == 'wecia-emo': for uid, entry in zip(uids, df.itertuples()): result = { "uid": entry.uid, "image": Image.open(os.path.join(image_dir, entry.file_name)), "art_style": entry.art_style, "painting": entry.painting, "text": entry.text, "emotion": None, 'language': None, } yield (uid, result) elif name == 'wecia-cap': for uid, entry in zip(uids, df.itertuples()): result = { "uid": entry.uid, "image": Image.open(os.path.join(image_dir, entry.file_name)), "art_style": entry.art_style, "painting": entry.painting, "emotion": entry.emotion, "language": entry.language, "text": None, } yield (uid, result) else: for uid, entry in zip(uids, df.itertuples()): result = { "uid": uid, "image": Image.open(os.path.join(image_dir, entry.file_name)), "art_style": entry.art_style, "painting": entry.painting, "emotion": entry.emotion, "language": entry.language, "text": entry.text, } yield (uid, result)