Datasets:
File size: 5,640 Bytes
24925b9 63a7fe9 24925b9 a5dbb5f 24925b9 5854a60 63a7fe9 a5dbb5f 9f52e0a 24925b9 d66144f 24925b9 a5dbb5f 24925b9 62391ed 9f52e0a 24925b9 9f52e0a 62391ed 24925b9 51d3b65 24925b9 38ab900 b74cb80 24925b9 df2a6a4 24925b9 9f52e0a df2a6a4 b74cb80 df2a6a4 62391ed df2a6a4 62391ed df2a6a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
"""Universal Text Classification Dataset (UTCD)"""
import os
import json
from os.path import join as os_join
from typing import List
import datasets
from huggingface_hub import hf_hub_download
_DESCRIPTION = """
UTCD is a compilation of 18 classification datasets spanning 3 categories of Sentiment,
Intent/Dialogue and Topic classification. UTCD focuses on the task of zero-shot text classification where the
candidate labels are descriptive of the text being classified. UTCD consists of ~ 6M/800K train/test examples.
"""
# TODO: citation
_URL = "https://github.com/ChrisIsKing/zero-shot-text-classification/tree/master"
_URL_ZIP = "https://huggingface.co/datasets/claritylab/UTCD/raw/main/datasets.zip"
_VERSION = datasets.Version('0.0.1')
class UtcdConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, domain: str, normalize_aspect: bool = False, **kwargs):
"""BuilderConfig for UTCD.
Args:
domain: `string`, dataset domain, one of [`in`, `out`].
normalize_aspect: `bool`, if True, an aspect-normalized version of the dataset is returned.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version.
super(UtcdConfig, self).__init__(version=_VERSION, **kwargs)
assert domain in ['in', 'out']
self.domain = domain
self.normalize_aspect = normalize_aspect
def to_dir_name(self):
"""
:return: directory name for the dataset files for this config stored on hub
"""
domain_str = 'in-domain' if self.domain == 'in' else 'out-of-domain'
prefix = 'aspect-normalized-' if self.normalize_aspect else ''
return f'{prefix}{domain_str}'
# for getting dataset viewer working on hub, don't have write access to /.cache
config_fnm = hf_hub_download(
repo_id='claritylab/utcd', filename='_utcd_info.json', cache_dir=os.path.dirname(__file__), repo_type='dataset'
)
with open(config_fnm) as f:
_config = json.load(f)
_split2hf_split = dict(train=datasets.Split.TRAIN, eval=datasets.Split.VALIDATION, test=datasets.Split.TEST)
class Utcd(datasets.GeneratorBasedBuilder):
"""UTCD: Universal Text Classification Dataset. Version 0.0."""
VERSION = _VERSION
BUILDER_CONFIGS = [
UtcdConfig(
name='in-domain',
description='All in-domain datasets.',
domain='in',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-in-domain',
description='Aspect-normalized version of all in-domain datasets.',
domain='in',
normalize_aspect=True
),
UtcdConfig(
name='out-of-domain',
description='All out-of-domain datasets.',
domain='out',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-out-of-domain',
description='Aspect-normalized version of all out-of-domain datasets.',
domain='out',
normalize_aspect=True
)
]
DEFAULT_CONFIG_NAME = 'in-domain'
def _get_dataset_names(self):
return [dnm for dnm, d_dset in _config.items() if d_dset['domain'] == self.config.domain]
def _info(self):
dnms = self._get_dataset_names()
# labels = [_config[dnm]['splits'][split]['labels'] for dnm in dnms for split in ['train', 'test']]
# labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
aspects = [d['aspect'] for dnm, d in _config.items()]
aspects = sorted(set(aspects)) # drop duplicate aspects
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
text=datasets.Value(dtype='string'),
# labels=datasets.Sequence(feature=datasets.ClassLabel(names=labels), length=-1), # for multi-label
labels=datasets.Sequence(feature=datasets.Value(dtype='string'), length=-1),
dataset_name=datasets.ClassLabel(names=dnms),
aspect=datasets.ClassLabel(names=aspects)
),
homepage=_URL
# TODO: citation
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
# for aspect-normalized versions of the dataset, we include a validation set
splits = ['train', 'eval', 'test'] if self.config.normalize_aspect else ['train', 'test']
dnms = self._get_dataset_names()
dir_nm = self.config.to_dir_name()
# TODO: update root dataset naming version & dataset split naming
base_path = dl_manager.download_and_extract('datasets.zip')
split2paths = {s: [os_join(base_path, f'{dir_nm}_split', dnm, f'{s}.json') for dnm in dnms] for s in splits}
# order of dataset file paths will be deterministic for deterministic dataset name ordering
return [
datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs=dict(filepath=split2paths[s])) for s in splits
]
def _generate_examples(self, filepath: List[str]):
id_ = 0
for path in filepath: # each file for one split of one dataset
dnm = path.split(os.sep)[-2]
aspect = _config[dnm]['aspect']
with open(path, encoding='utf-8') as fl:
dset = json.load(fl)
for txt, labels in dset.items():
yield id_, dict(text=txt, labels=labels, dataset_name=dnm, aspect=aspect)
id_ += 1
|