Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
multi-class-classification
Languages:
English
Size:
10K - 100K
ArXiv:
License:
Move task information to loading script
Browse files
raft.py
CHANGED
@@ -17,6 +17,7 @@
|
|
17 |
import csv
|
18 |
import json
|
19 |
import os
|
|
|
20 |
|
21 |
import datasets
|
22 |
|
@@ -45,10 +46,186 @@ _LICENSE = ""
|
|
45 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
46 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
47 |
# This gets all folders within the directory named `data`
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
_URLs = {s: {
|
51 |
-
'test': f"data/{s}/test_unlabeled.csv"} for s in DATA_DIRS}
|
52 |
|
53 |
|
54 |
class Raft(datasets.GeneratorBasedBuilder):
|
@@ -67,36 +244,28 @@ class Raft(datasets.GeneratorBasedBuilder):
|
|
67 |
# You will be able to load one or the other configurations in the following list with
|
68 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
69 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
70 |
-
|
71 |
-
# TODO: Load task jsons
|
72 |
-
|
73 |
-
tasks = {}
|
74 |
-
for sd in DATA_DIRS:
|
75 |
-
with open(os.path.join('data', sd, 'task.json')) as f:
|
76 |
-
task_data = json.load(f)
|
77 |
-
tasks[sd] = task_data
|
78 |
-
|
79 |
BUILDER_CONFIGS = []
|
80 |
-
for key in tasks:
|
81 |
-
td = tasks[key]
|
82 |
-
name = td['name']
|
83 |
-
description = td['description']
|
84 |
-
BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION,
|
85 |
-
description=description))
|
86 |
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
def _info(self):
|
90 |
DEFAULT_LABEL_NAME = "Unlabeled"
|
91 |
|
92 |
-
task =
|
93 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
94 |
-
data_columns = {col_name: datasets.Value("string") for col_name in
|
95 |
-
task['data_columns']}
|
96 |
|
97 |
label_columns = {}
|
98 |
-
for label_name in task[
|
99 |
-
labels = ["Unlabeled"] + task[
|
100 |
label_columns[label_name] = datasets.ClassLabel(len(labels), labels)
|
101 |
|
102 |
# Merge dicts
|
@@ -130,27 +299,26 @@ class Raft(datasets.GeneratorBasedBuilder):
|
|
130 |
data_dir = dl_manager.download_and_extract(_URLs)
|
131 |
dataset = self.config.name.split("-")[0]
|
132 |
return [
|
133 |
-
datasets.SplitGenerator(
|
134 |
-
|
135 |
-
|
136 |
-
datasets.SplitGenerator(
|
137 |
-
|
138 |
-
|
139 |
]
|
140 |
|
141 |
def _generate_examples(
|
142 |
-
|
143 |
):
|
144 |
-
"""
|
145 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
146 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
147 |
|
148 |
-
task =
|
149 |
-
labels = list(task[
|
150 |
|
151 |
with open(filepath, encoding="utf-8") as f:
|
152 |
-
csv_reader = csv.reader(f, quotechar='"', delimiter=",",
|
153 |
-
quoting=csv.QUOTE_ALL, skipinitialspace=True)
|
154 |
column_names = next(csv_reader)
|
155 |
# Test csvs don't have any label columns.
|
156 |
if split == "test":
|
|
|
17 |
import csv
|
18 |
import json
|
19 |
import os
|
20 |
+
from pathlib import Path
|
21 |
|
22 |
import datasets
|
23 |
|
|
|
46 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
47 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
48 |
# This gets all folders within the directory named `data`
|
49 |
+
DATA_DIR_URL = "data/" # "https://huggingface.co/datasets/ought/raft/resolve/main/data/"
|
50 |
+
# print([p for p in DATA_DIR_PATH.iterdir() if p.is_dir()])
|
51 |
+
TASKS = {
|
52 |
+
"banking_77": {
|
53 |
+
"name": "banking_77",
|
54 |
+
"description": "",
|
55 |
+
"data_columns": ["Query", "ID"],
|
56 |
+
"label_columns": {
|
57 |
+
"Label": [
|
58 |
+
"Refund_not_showing_up",
|
59 |
+
"activate_my_card",
|
60 |
+
"age_limit",
|
61 |
+
"apple_pay_or_google_pay",
|
62 |
+
"atm_support",
|
63 |
+
"automatic_top_up",
|
64 |
+
"balance_not_updated_after_bank_transfer",
|
65 |
+
"balance_not_updated_after_cheque_or_cash_deposit",
|
66 |
+
"beneficiary_not_allowed",
|
67 |
+
"cancel_transfer",
|
68 |
+
"card_about_to_expire",
|
69 |
+
"card_acceptance",
|
70 |
+
"card_arrival",
|
71 |
+
"card_delivery_estimate",
|
72 |
+
"card_linking",
|
73 |
+
"card_not_working",
|
74 |
+
"card_payment_fee_charged",
|
75 |
+
"card_payment_not_recognised",
|
76 |
+
"card_payment_wrong_exchange_rate",
|
77 |
+
"card_swallowed",
|
78 |
+
"cash_withdrawal_charge",
|
79 |
+
"cash_withdrawal_not_recognised",
|
80 |
+
"change_pin",
|
81 |
+
"compromised_card",
|
82 |
+
"contactless_not_working",
|
83 |
+
"country_support",
|
84 |
+
"declined_card_payment",
|
85 |
+
"declined_cash_withdrawal",
|
86 |
+
"declined_transfer",
|
87 |
+
"direct_debit_payment_not_recognised",
|
88 |
+
"disposable_card_limits",
|
89 |
+
"edit_personal_details",
|
90 |
+
"exchange_charge",
|
91 |
+
"exchange_rate",
|
92 |
+
"exchange_via_app",
|
93 |
+
"extra_charge_on_statement",
|
94 |
+
"failed_transfer",
|
95 |
+
"fiat_currency_support",
|
96 |
+
"get_disposable_virtual_card",
|
97 |
+
"get_physical_card",
|
98 |
+
"getting_spare_card",
|
99 |
+
"getting_virtual_card",
|
100 |
+
"lost_or_stolen_card",
|
101 |
+
"lost_or_stolen_phone",
|
102 |
+
"order_physical_card",
|
103 |
+
"passcode_forgotten",
|
104 |
+
"pending_card_payment",
|
105 |
+
"pending_cash_withdrawal",
|
106 |
+
"pending_top_up",
|
107 |
+
"pending_transfer",
|
108 |
+
"pin_blocked",
|
109 |
+
"receiving_money",
|
110 |
+
"request_refund",
|
111 |
+
"reverted_card_payment?",
|
112 |
+
"supported_cards_and_currencies",
|
113 |
+
"terminate_account",
|
114 |
+
"top_up_by_bank_transfer_charge",
|
115 |
+
"top_up_by_card_charge",
|
116 |
+
"top_up_by_cash_or_cheque",
|
117 |
+
"top_up_failed",
|
118 |
+
"top_up_limits",
|
119 |
+
"top_up_reverted",
|
120 |
+
"topping_up_by_card",
|
121 |
+
"transaction_charged_twice",
|
122 |
+
"transfer_fee_charged",
|
123 |
+
"transfer_into_account",
|
124 |
+
"transfer_not_received_by_recipient",
|
125 |
+
"transfer_timing",
|
126 |
+
"unable_to_verify_identity",
|
127 |
+
"verify_my_identity",
|
128 |
+
"verify_source_of_funds",
|
129 |
+
"verify_top_up",
|
130 |
+
"virtual_card_not_working",
|
131 |
+
"visa_or_mastercard",
|
132 |
+
"why_verify_identity",
|
133 |
+
"wrong_amount_of_cash_received",
|
134 |
+
"wrong_exchange_rate_for_cash_withdrawal",
|
135 |
+
]
|
136 |
+
},
|
137 |
+
},
|
138 |
+
"medical_subdomain_of_clinical_notes": {
|
139 |
+
"name": "medical_subdomain_of_clinical_notes",
|
140 |
+
"description": "",
|
141 |
+
"data_columns": ["Note", "ID"],
|
142 |
+
"label_columns": {
|
143 |
+
"Label": ["cardiology", "gastroenterology", "nephrology", "neurology", "psychiatry", "pulmonary disease"]
|
144 |
+
},
|
145 |
+
},
|
146 |
+
"overruling": {
|
147 |
+
"name": "overruling",
|
148 |
+
"description": "",
|
149 |
+
"data_columns": ["Sentence", "ID"],
|
150 |
+
"label_columns": {"Label": ["not overruling", "overruling"]},
|
151 |
+
},
|
152 |
+
"gpai_initiatives": {
|
153 |
+
"name": "gpai_initiatives",
|
154 |
+
"description": "",
|
155 |
+
"data_columns": [
|
156 |
+
"Name",
|
157 |
+
"Link",
|
158 |
+
"Organization / Author",
|
159 |
+
"Brief Description",
|
160 |
+
"Sector",
|
161 |
+
"Geographical scope",
|
162 |
+
"Target Audience",
|
163 |
+
"Stage of Development",
|
164 |
+
"Date started",
|
165 |
+
"Country/region of origin",
|
166 |
+
"Notes (including specific SDG(s) and OECD AI Principles addressed)",
|
167 |
+
"ID",
|
168 |
+
],
|
169 |
+
"label_columns": {
|
170 |
+
"Label: AI and Ethics": ["0", "1"],
|
171 |
+
"Label: AI and Governance": ["0", "1"],
|
172 |
+
"Label: AI and Social Good": ["0", "1"],
|
173 |
+
},
|
174 |
+
},
|
175 |
+
"semiconductor_org_types": {
|
176 |
+
"name": "semiconductor_org_types",
|
177 |
+
"description": "",
|
178 |
+
"data_columns": ["Paper title", "Organization name", "ID"],
|
179 |
+
"label_columns": {"Label": ["company", "research institute", "university"]},
|
180 |
+
},
|
181 |
+
"twitter_complaints": {
|
182 |
+
"name": "twitter_complaints",
|
183 |
+
"description": "",
|
184 |
+
"data_columns": ["Tweet text", "ID"],
|
185 |
+
"label_columns": {"Label": ["complaint", "no complaint"]},
|
186 |
+
},
|
187 |
+
"neurips_impact_statement_risks": {
|
188 |
+
"name": "neurips_impact_statement_risks",
|
189 |
+
"description": "",
|
190 |
+
"data_columns": ["Paper title", "Paper link", "Impact statement", "ID"],
|
191 |
+
"label_columns": {"Label": ["doesn't mention a harmful application", "mentions a harmful application"]},
|
192 |
+
},
|
193 |
+
"systematic_review_inclusion": {
|
194 |
+
"name": "systematic_review_inclusion",
|
195 |
+
"description": "",
|
196 |
+
"data_columns": ["Title", "Abstract", "Authors", "Journal", "ID"],
|
197 |
+
"label_columns": {"Label": ["included", "not included"]},
|
198 |
+
},
|
199 |
+
"terms_of_service": {
|
200 |
+
"name": "terms_of_service",
|
201 |
+
"description": "",
|
202 |
+
"data_columns": ["Sentence", "ID"],
|
203 |
+
"label_columns": {"Label": ["not potentially unfair", "potentially unfair"]},
|
204 |
+
},
|
205 |
+
"tai_safety_research": {
|
206 |
+
"name": "tai_safety_research",
|
207 |
+
"description": "",
|
208 |
+
"data_columns": [
|
209 |
+
"Title",
|
210 |
+
"Abstract Note",
|
211 |
+
"Url",
|
212 |
+
"Publication Year",
|
213 |
+
"Item Type",
|
214 |
+
"Author",
|
215 |
+
"Publication Title",
|
216 |
+
"ID",
|
217 |
+
],
|
218 |
+
"label_columns": {"Label": ["TAI safety research", "not TAI safety research"]},
|
219 |
+
},
|
220 |
+
"one_stop_english": {
|
221 |
+
"name": "one_stop_english",
|
222 |
+
"description": "",
|
223 |
+
"data_columns": ["Text", "ID"],
|
224 |
+
"label_columns": {"Label": ["advanced", "elementary", "intermediate"]},
|
225 |
+
},
|
226 |
+
}
|
227 |
|
228 |
+
_URLs = {s: {"train": f"{DATA_DIR_URL}{s}/train.csv", "test": f"{DATA_DIR_URL}{s}/test_unlabeled.csv"} for s in TASKS}
|
|
|
229 |
|
230 |
|
231 |
class Raft(datasets.GeneratorBasedBuilder):
|
|
|
244 |
# You will be able to load one or the other configurations in the following list with
|
245 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
246 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
BUILDER_CONFIGS = []
|
|
|
|
|
|
|
|
|
|
|
|
|
248 |
|
249 |
+
for key in TASKS:
|
250 |
+
td = TASKS[key]
|
251 |
+
name = td["name"]
|
252 |
+
description = td["description"]
|
253 |
+
BUILDER_CONFIGS.append(datasets.BuilderConfig(name=name, version=VERSION, description=description))
|
254 |
+
|
255 |
+
DEFAULT_CONFIG_NAME = (
|
256 |
+
"tai_safety_research" # It's not mandatory to have a default configuration. Just use one if it make sense.
|
257 |
+
)
|
258 |
|
259 |
def _info(self):
|
260 |
DEFAULT_LABEL_NAME = "Unlabeled"
|
261 |
|
262 |
+
task = TASKS[self.config.name]
|
263 |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
|
264 |
+
data_columns = {col_name: datasets.Value("string") for col_name in task["data_columns"]}
|
|
|
265 |
|
266 |
label_columns = {}
|
267 |
+
for label_name in task["label_columns"]:
|
268 |
+
labels = ["Unlabeled"] + task["label_columns"][label_name]
|
269 |
label_columns[label_name] = datasets.ClassLabel(len(labels), labels)
|
270 |
|
271 |
# Merge dicts
|
|
|
299 |
data_dir = dl_manager.download_and_extract(_URLs)
|
300 |
dataset = self.config.name.split("-")[0]
|
301 |
return [
|
302 |
+
datasets.SplitGenerator(
|
303 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir[dataset]["train"], "split": "train"}
|
304 |
+
),
|
305 |
+
datasets.SplitGenerator(
|
306 |
+
name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir[dataset]["test"], "split": "test"}
|
307 |
+
),
|
308 |
]
|
309 |
|
310 |
def _generate_examples(
|
311 |
+
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
312 |
):
|
313 |
+
"""Yields examples as (key, example) tuples."""
|
314 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
315 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
316 |
|
317 |
+
task = TASKS[self.config.name]
|
318 |
+
labels = list(task["label_columns"])
|
319 |
|
320 |
with open(filepath, encoding="utf-8") as f:
|
321 |
+
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
|
|
|
322 |
column_names = next(csv_reader)
|
323 |
# Test csvs don't have any label columns.
|
324 |
if split == "test":
|