Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
utcd / utcd.py
StefanH's picture
Update: dset default config
d66144f
raw
history blame
6.55 kB
"""Universal Text Classification Dataset (UTCD)"""
import os
import json
from os.path import join as os_join
from typing import List
import datasets
from stefutil import *
_DESCRIPTION = """
UTCD is a compilation of 18 classification datasets spanning 3 categories of Sentiment,
Intent/Dialogue and Topic classification. UTCD focuses on the task of zero-shot text classification where the
candidate labels are descriptive of the text being classified. UTCD consists of ~ 6M/800K train/test examples.
"""
# TODO: citation
_URL = "https://github.com/ChrisIsKing/zero-shot-text-classification/tree/master"
_URL_ZIP = "https://huggingface.co/datasets/claritylab/UTCD/raw/main/datasets.zip"
_VERSION = datasets.Version('0.0.1')
class UtcdConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, domain: str, normalize_aspect: bool = False, **kwargs):
"""BuilderConfig for UTCD.
Args:
domain: `string`, dataset domain, one of [`in`, `out`].
normalize_aspect: `bool`, if True, an aspect-normalized version of the dataset is returned.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version.
super(UtcdConfig, self).__init__(version=_VERSION, **kwargs)
ca.check_mismatch('Dataset Domain', domain, ['in', 'out'])
self.domain = domain
self.normalize_aspect = normalize_aspect
def to_dir_name(self):
"""
:return: directory name for the dataset files for this config stored on hub
"""
domain_str = 'in-domain' if self.domain == 'in' else 'out-of-domain'
prefix = 'aspect-normalized-' if self.normalize_aspect else ''
return f'{prefix}{domain_str}'
config = StefConfig('config.json')
# mic(config('go_emotion'))
_split2hf_split = dict(train=datasets.Split.TRAIN, eval=datasets.Split.VALIDATION, test=datasets.Split.TEST)
class Utcd(datasets.GeneratorBasedBuilder):
"""UTCD: Universal Text Classification Dataset. Version 0.0."""
# _config = dict(
# go_emotion=dict(aspect='sentiment', domain='in', name='GoEmotions'),
# sentiment_tweets_2020=dict(aspect='sentiment', domain='in', name='TweetEval'),
# emotion=dict(aspect='sentiment', domain='in', name='Emotion'),
# sgd=dict(aspect='intent', domain='in', name='Schema-Guided Dialogue'),
# clinc_150=dict(aspect='intent', domain='in', name='Clinc-150'),
# slurp=dict(aspect='intent', domain='in', name='SLURP'),
# ag_news=dict(aspect='topic', domain='in', name='AG News'),
# dbpedia=dict(aspect='topic', domain='in', name='DBpedia'),
# yahoo=dict(aspect='topic', domain='in', name='Yahoo Answer Topics'),
#
# amazon_polarity=dict(aspect='sentiment', domain='out', name='Amazon Review Polarity'),
# finance_sentiment=dict( aspect='sentiment', domain='out', name='Financial Phrase Bank'),
# yelp=dict(aspect='sentiment', domain='out', name='Yelp Review'),
# banking77=dict(aspect='intent', domain='out', name='Banking77'),
# snips=dict(aspect='intent', domain='out', name='SNIPS'),
# nlu_evaluation=dict(aspect='intent', domain='out', name='NLU Evaluation'),
# multi_eurlex=dict(aspect='topic', domain='out', name='MultiEURLEX'),
# patent=dict(aspect='topic', domain='out', name='Big Patent'),
# consumer_finance=dict(aspect='topic', domain='out', name='Consumer Finance Complaints')
# )
VERSION = _VERSION
BUILDER_CONFIGS = [
UtcdConfig(
name='in-domain',
description='All in-domain datasets.',
domain='in',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-in-domain',
description='Aspect-normalized version of all in-domain datasets.',
domain='in',
normalize_aspect=True
),
UtcdConfig(
name='out-of-domain',
description='All out-of-domain datasets.',
domain='out',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-out-of-domain',
description='Aspect-normalized version of all out-of-domain datasets.',
domain='out',
normalize_aspect=True
)
]
DEFAULT_CONFIG_NAME = 'in-domain'
def _get_dataset_names(self):
return [dnm for dnm, d_dset in config().items() if d_dset['domain'] == self.config.domain]
def _info(self):
dnms = self._get_dataset_names()
labels = [config(f'{dnm}.splits.{split}.labels') for dnm in dnms for split in ['train', 'test']]
labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
text=datasets.Value(dtype='string'),
labels=datasets.Sequence(feature=datasets.ClassLabel(names=labels), length=-1), # for multi-label
dataset_name=datasets.ClassLabel(names=dnms)
),
homepage=_URL
# TODO: citation
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
# for aspect-normalized versions of the dataset, we include a validation set
splits = ['train', 'eval', 'test'] if self.config.normalize_aspect else ['train', 'test']
dnms = self._get_dataset_names()
dir_nm = self.config.to_dir_name()
# TODO: update root dataset naming version & dataset split naming
base_path = dl_manager.download_and_extract('datasets.zip')
split2paths = {s: [os_join(base_path, f'{dir_nm}_split', dnm, f'{s}.json') for dnm in dnms] for s in splits}
# order of dataset file paths will be deterministic for deterministic dataset name ordering
return [
datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs=dict(filepath=split2paths[s])) for s in splits
]
def _generate_examples(self, filepath: List[str]):
id_ = 0
for path in filepath: # each file for one split of one dataset
dnm = path.split(os.sep)[-2]
with open(path, encoding='utf-8') as fl:
dset = json.load(fl)
for txt, labels in dset.items():
yield id_, dict(text=txt, labels=labels, dataset_name=dnm)
id_ += 1