File size: 2,285 Bytes
8b96fe5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import pandas as pd
from rich import print
from datasets import DatasetDict, Value, ClassLabel, Dataset, Features
def load_dataset(input_url, label_url):
input_df = pd.read_csv(input_url)
label_df = pd.read_csv(label_url, header=None)
input_df.columns = ['id', 'context', 'answerA', 'answerB', 'answerC']
label_df.columns = ['id', 'label']
features = Features({
'context': Value('string'),
'answerA': Value('string'),
'answerB': Value('string'),
'answerC': Value('string'),
'label': ClassLabel(num_classes=3, names=['A', 'B', 'C']),
})
dataset = Dataset.from_dict({
'context': input_df['context'],
'answerA': input_df['answerA'],
'answerB': input_df['answerB'],
'answerC': input_df['answerC'],
'label': label_df['label']
}, features=features)
return dataset
input_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Training%20%20Data/subtaskB_data_all.csv'
label_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Training%20%20Data/subtaskB_answers_all.csv'
train_dataset = load_dataset(input_url, label_url)
input_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Dev%20Data/subtaskB_dev_data.csv'
label_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Dev%20Data/subtaskB_gold_answers.csv'
validation_dataset = load_dataset(input_url, label_url)
input_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Test%20Data/subtaskB_test_data.csv'
label_url = 'https://raw.githubusercontent.com/wangcunxiang/SemEval2020-Task4-Commonsense-Validation-and-Explanation/master/ALL%20data/Test%20Data/subtaskB_gold_answers.csv'
test_dataset = load_dataset(input_url, label_url)
dataset = DatasetDict({
'train': train_dataset,
'validation': validation_dataset,
'test': test_dataset
})
print(dataset)
dataset.push_to_hub('extraordinarylab/comve_taskb') |