File size: 2,916 Bytes
7b0843f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf0a4a2
 
7b0843f
 
eab2511
 
7b0843f
 
bf0a4a2
 
 
9236b43
7b0843f
bf0a4a2
 
7b0843f
bf0a4a2
 
 
 
 
 
 
 
 
9236b43
7b0843f
 
 
 
 
baa7db1
 
7b0843f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import csv

import datasets


_CITATION = """\
@article{hendryckstest2021,
      title={Measuring Massive Multitask Language Understanding},
      author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
      journal={Proceedings of the International Conference on Learning Representations (ICLR)},
      year={2021}
    }
"""

_DESCRIPTION = """\
Psycholinguistics word datasets 
"""

_HOMEPAGE = "To Add"

_URL = "data.tar"

_SUBJECTS = [
    "SimCat-TASLP2018",
    "SimLex999-COLI2015"
]


class MyDataset(datasets.GeneratorBasedBuilder):
    """Psycholinguistics word datasets"""

    # # 从 YAML 文件加载配置
    # with open("dataset_infos.yaml", "r") as f:
    #     configs = yaml.safe_load(f)["configs"]

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=sub, version=datasets.Version("1.0.0"), description=f"Psycholinguistics Volcabulary Datasets  {sub}"
        )
        for sub in _SUBJECTS

        # MyDatasetConfig(
        #     name=config["name"],
        #     version=datasets.Version(config["version"]),
        #     description=config["description"],
        #     data_files=config["data_files"]
        # )
        # for config in configs
    
    ]

    def _info(self):
        features = datasets.Features(
            {
                "word": datasets.Value("string"),
                "category": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        archive = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                # 传递给 _generate_examples 方法的参数
                gen_kwargs={"iter_archive": dl_manager.iter_archive(archive), "split": "test"},
            ),
        ]

    def _generate_examples(self, iter_archive, split):
        """Yields examples as (key, example) tuples."""
        n_yielded_files = 0
        for id_file, (path, file) in enumerate(iter_archive):
            # For example, we iterate through the data folder and find a file with the path "data/test"
            if f"data/{split}/" in path:
                # For example, SimCat-TASLP2018_test.csv
                if f"{self.config.name}_{split}.csv" in path:
                    n_yielded_files += 1
                    lines = (line.decode("utf-8") for line in file)
                    reader = csv.reader(lines)
                    for id_line, data in enumerate(reader):
                        yield f"{id_file}_{id_line}", {"Word": data[0], "Category": data[1]}
                    if n_yielded_files == 8:
                        break