|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """ |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
DATA_DIR_URL = "data/" |
|
|
|
TASKS = { |
|
"ade_corpus_v2": { |
|
"name": "ade_corpus_v2", |
|
"description": "", |
|
"data_columns": ["Sentence", "ID"], |
|
"label_columns": {"Label": ["ADE-related", "not ADE-related"]}, |
|
}, |
|
"banking_77": { |
|
"name": "banking_77", |
|
"description": "", |
|
"data_columns": ["Query", "ID"], |
|
"label_columns": { |
|
"Label": [ |
|
"Refund_not_showing_up", |
|
"activate_my_card", |
|
"age_limit", |
|
"apple_pay_or_google_pay", |
|
"atm_support", |
|
"automatic_top_up", |
|
"balance_not_updated_after_bank_transfer", |
|
"balance_not_updated_after_cheque_or_cash_deposit", |
|
"beneficiary_not_allowed", |
|
"cancel_transfer", |
|
"card_about_to_expire", |
|
"card_acceptance", |
|
"card_arrival", |
|
"card_delivery_estimate", |
|
"card_linking", |
|
"card_not_working", |
|
"card_payment_fee_charged", |
|
"card_payment_not_recognised", |
|
"card_payment_wrong_exchange_rate", |
|
"card_swallowed", |
|
"cash_withdrawal_charge", |
|
"cash_withdrawal_not_recognised", |
|
"change_pin", |
|
"compromised_card", |
|
"contactless_not_working", |
|
"country_support", |
|
"declined_card_payment", |
|
"declined_cash_withdrawal", |
|
"declined_transfer", |
|
"direct_debit_payment_not_recognised", |
|
"disposable_card_limits", |
|
"edit_personal_details", |
|
"exchange_charge", |
|
"exchange_rate", |
|
"exchange_via_app", |
|
"extra_charge_on_statement", |
|
"failed_transfer", |
|
"fiat_currency_support", |
|
"get_disposable_virtual_card", |
|
"get_physical_card", |
|
"getting_spare_card", |
|
"getting_virtual_card", |
|
"lost_or_stolen_card", |
|
"lost_or_stolen_phone", |
|
"order_physical_card", |
|
"passcode_forgotten", |
|
"pending_card_payment", |
|
"pending_cash_withdrawal", |
|
"pending_top_up", |
|
"pending_transfer", |
|
"pin_blocked", |
|
"receiving_money", |
|
"request_refund", |
|
"reverted_card_payment?", |
|
"supported_cards_and_currencies", |
|
"terminate_account", |
|
"top_up_by_bank_transfer_charge", |
|
"top_up_by_card_charge", |
|
"top_up_by_cash_or_cheque", |
|
"top_up_failed", |
|
"top_up_limits", |
|
"top_up_reverted", |
|
"topping_up_by_card", |
|
"transaction_charged_twice", |
|
"transfer_fee_charged", |
|
"transfer_into_account", |
|
"transfer_not_received_by_recipient", |
|
"transfer_timing", |
|
"unable_to_verify_identity", |
|
"verify_my_identity", |
|
"verify_source_of_funds", |
|
"verify_top_up", |
|
"virtual_card_not_working", |
|
"visa_or_mastercard", |
|
"why_verify_identity", |
|
"wrong_amount_of_cash_received", |
|
"wrong_exchange_rate_for_cash_withdrawal", |
|
] |
|
}, |
|
}, |
|
"terms_of_service": { |
|
"name": "terms_of_service", |
|
"description": "", |
|
"data_columns": ["Sentence", "ID"], |
|
"label_columns": {"Label": ["not potentially unfair", "potentially unfair"]}, |
|
}, |
|
"tai_safety_research": { |
|
"name": "tai_safety_research", |
|
"description": "", |
|
"data_columns": [ |
|
"Title", |
|
"Abstract Note", |
|
"Url", |
|
"Publication Year", |
|
"Item Type", |
|
"Author", |
|
"Publication Title", |
|
"ID", |
|
], |
|
"label_columns": {"Label": ["TAI safety research", "not TAI safety research"]}, |
|
}, |
|
"neurips_impact_statement_risks": { |
|
"name": "neurips_impact_statement_risks", |
|
"description": "", |
|
"data_columns": ["Paper title", "Paper link", "Impact statement", "ID"], |
|
"label_columns": {"Label": ["doesn't mention a harmful application", "mentions a harmful application"]}, |
|
}, |
|
"overruling": { |
|
"name": "overruling", |
|
"description": "", |
|
"data_columns": ["Sentence", "ID"], |
|
"label_columns": {"Label": ["not overruling", "overruling"]}, |
|
}, |
|
"systematic_review_inclusion": { |
|
"name": "systematic_review_inclusion", |
|
"description": "", |
|
"data_columns": ["Title", "Abstract", "Authors", "Journal", "ID"], |
|
"label_columns": {"Label": ["included", "not included"]}, |
|
}, |
|
"one_stop_english": { |
|
"name": "one_stop_english", |
|
"description": "", |
|
"data_columns": ["Article", "ID"], |
|
"label_columns": {"Label": ["advanced", "elementary", "intermediate"]}, |
|
}, |
|
"tweet_eval_hate": { |
|
"name": "tweet_eval_hate", |
|
"description": "", |
|
"data_columns": ["Tweet", "ID"], |
|
"label_columns": {"Label": ["hate speech", "not hate speech"]}, |
|
}, |
|
"twitter_complaints": { |
|
"name": "twitter_complaints", |
|
"description": "", |
|
"data_columns": ["Tweet text", "ID"], |
|
"label_columns": {"Label": ["complaint", "no complaint"]}, |
|
}, |
|
"semiconductor_org_types": { |
|
"name": "semiconductor_org_types", |
|
"description": "", |
|
"data_columns": ["Paper title", "Organization name", "ID"], |
|
"label_columns": {"Label": ["company", "research institute", "university"]}, |
|
}, |
|
} |
|
|
|
_URLs = {s: {"test": f"{DATA_DIR_URL}{s}/predictions.csv"} for s in TASKS} |
|
|
|
|
|
class RaftSubmission(datasets.GeneratorBasedBuilder): |
|
"""RAFT Dataset dummy predictions.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [] |
|
for key in TASKS: |
|
td = TASKS[key] |
|
name = td["name"] |
|
description = td["description"] |
|
BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION, description=description)) |
|
|
|
DEFAULT_CONFIG_NAME = ( |
|
"tai_safety_research" |
|
) |
|
|
|
def _info(self): |
|
|
|
DEFAULT_LABEL_NAME = "Unlabeled" |
|
|
|
task = TASKS[self.config.name] |
|
data_columns = {"ID": datasets.Value("string")} |
|
|
|
label_columns = {} |
|
for label_name in task["label_columns"]: |
|
labels = [DEFAULT_LABEL_NAME] + task["label_columns"][label_name] |
|
label_columns[label_name] = datasets.ClassLabel(len(labels), labels) |
|
|
|
|
|
features = datasets.Features(**data_columns, **label_columns) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URLs) |
|
dataset = self.config.name |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir[dataset]["test"], "split": "test"} |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, filepath, split |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) |
|
column_names = next(csv_reader) |
|
for id_, row in enumerate(csv_reader): |
|
|
|
yield id_, {name: value for name, value in zip(column_names, row)} |
|
|