File size: 3,234 Bytes
fc6e90a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# coding=utf-8
# Copyright 2023 Devrim Cavusoglu and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""Wiki Long Subset."""


import json

import datasets

logger = datasets.logging.get_logger(__name__)


_DESCRIPTION = """\
Dataset consisting of long wikipedia articles.
"""

_URLS = {
    "train": [
        "train/partition_0.jsonl",
        "train/partition_1.jsonl",
        "train/partition_2.jsonl",
        "train/partition_3.jsonl",
        "train/partition_4.jsonl",
        "train/partition_5.jsonl",
        "train/partition_6.jsonl",
        "train/partition_7.jsonl",
        "train/partition_8.jsonl",
        "train/partition_9.jsonl",
        "train/partition_10.jsonl",
        "train/partition_11.jsonl",
    ],
    "test": "test/partition_0.jsonl",
}


class WikiLongDatasetConfig(datasets.BuilderConfig):
    """BuilderConfig for Dataset."""

    def __init__(self, **kwargs):
        """BuilderConfig for Dataset.

        Args:
            **kwargs: keyword arguments forwarded to super.
        """
        super(WikiLongDatasetConfig, self).__init__(**kwargs)

    @property
    def features(self):
        return {
                "id": datasets.Value("string"),
                "url": datasets.Value("string"),
                "title": datasets.Value("string"),
                "text": datasets.Value("string"),
            }


class WikiLongDataset(datasets.GeneratorBasedBuilder):
    """WikiLongDataset Classification dataset. Version 1.0."""

    BUILDER_CONFIGS = [
        WikiLongDatasetConfig(
            version=datasets.Version("1.0.0", ""),
            description="Long Wikipedia Articles"
        )
    ]
    BUILDER_CONFIG_CLASS = WikiLongDatasetConfig

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(self.config.features),
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}),
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}),
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info("generating examples from = %s", filepath)
        if isinstance(filepath, str):
            filepath = [filepath]
        key = 0
        for path in filepath:
            with open(path, encoding="utf-8") as data:
                for article in data:
                    yield key, json.loads(article)
                    key += 1