Datasets:
Modalities:
Text
Size:
10K - 100K
import csv | |
import datasets | |
_CITATION = """\ | |
@article{hendryckstest2021, | |
title={Measuring Massive Multitask Language Understanding}, | |
author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt}, | |
journal={Proceedings of the International Conference on Learning Representations (ICLR)}, | |
year={2021} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Psycholinguistics word datasets | |
""" | |
_HOMEPAGE = "To Add" | |
_URL = "data.tar" | |
_SUBJECTS = [ | |
"SimCat-TASLP2018", | |
"SimLex999-COLI2015" | |
] | |
class MyDataset(datasets.GeneratorBasedBuilder): | |
"""Psycholinguistics word datasets""" | |
# # 从 YAML 文件加载配置 | |
# with open("dataset_infos.yaml", "r") as f: | |
# configs = yaml.safe_load(f)["configs"] | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name=sub, version=datasets.Version("1.0.0"), description=f"Psycholinguistics Volcabulary Datasets {sub}" | |
) | |
for sub in _SUBJECTS | |
# MyDatasetConfig( | |
# name=config["name"], | |
# version=datasets.Version(config["version"]), | |
# description=config["description"], | |
# data_files=config["data_files"] | |
# ) | |
# for config in configs | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"word": datasets.Value("string"), | |
"category": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
archive = dl_manager.download(_URL) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# 传递给 _generate_examples 方法的参数 | |
gen_kwargs={"iter_archive": dl_manager.iter_archive(archive), "split": "test"}, | |
), | |
] | |
def _generate_examples(self, iter_archive, split): | |
"""Yields examples as (key, example) tuples.""" | |
n_yielded_files = 0 | |
for id_file, (path, file) in enumerate(iter_archive): | |
# For example, we iterate through the data folder and find a file with the path "data/test" | |
if f"data/{split}/" in path: | |
# For example, SimCat-TASLP2018_test.csv | |
if f"{self.config.name}_{split}.csv" in path: | |
n_yielded_files += 1 | |
lines = (line.decode("utf-8") for line in file) | |
reader = csv.reader(lines) | |
for id_line, data in enumerate(reader): | |
yield f"{id_file}_{id_line}", {"Word": data[0], "Category": data[1]} | |
if n_yielded_files == 8: | |
break |