File size: 4,133 Bytes
4deba17
c7a9682
4deba17
 
 
13d54f1
4deba17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13d54f1
 
 
4deba17
72ffc94
4deba17
 
13d54f1
4deba17
13d54f1
4deba17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13d54f1
 
 
 
4deba17
 
 
13d54f1
c7a9682
4deba17
 
 
13d54f1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
"""MC4_Legal"""
import ast
import json

import datasets
from huggingface_hub.file_download import hf_hub_url

try:
    import lzma as xz
except ImportError:
    import pylzma as xz

datasets.logging.set_verbosity_info()
logger = datasets.logging.get_logger(__name__)

_DESCRIPTION = """
"""

_CITATION = """
"""

_URL = "https://huggingface.co/datasets/joelito/mc4_legal"

_LANGUAGES = [
    "bg",
    "cs",
    "da",
    "de",
    "el",
    "en",
    "es",
    "et",
    "fi",
    "fr",
    "ga",
    # "hr", # hr is not present in mc4
    "hu",
    "it",
    "lt",
    "lv",
    "mt",
    "nl",
    "pl",
    "pt",
    "ro",
    "sk",
    "sl",
    "sv",
]


class MC4LegalConfig(datasets.BuilderConfig):
    """BuilderConfig for MC4_Legal."""

    def __init__(self, name: str, **kwargs):
        """BuilderConfig for MC4_Legal.
        Args:
            name: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all
          **kwargs: keyword arguments forwarded to super.
        """
        super(MC4LegalConfig, self).__init__(**kwargs)
        self.name = name


class MC4Legal(datasets.GeneratorBasedBuilder):
    """MC4_Legal: A Corpus Covering the Legal Part of MC4 for European Languages"""

    BUILDER_CONFIGS = [MC4LegalConfig(language) for language in _LANGUAGES + ["all"]]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "url": datasets.Value("string"),
                    "timestamp": datasets.Value("timestamp[s]"),
                    "matches": datasets.Sequence(datasets.Value("string")),
                    "text": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_URL,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        def get_url(file_name):
            return hf_hub_url(repo_id="joelito/mc4_legal", filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")

        data_urls = []
        languages = _LANGUAGES if self.config.name == "all" else [self.config.name]
        for language in languages:
            if language in ["de", "en", "es"]:  # here we need to chunk because the files are too large
                data_urls.extend([get_url(f"{language}_{idx}") for idx in [0, 1]])
            else:
                data_urls.append(get_url(language))

        downloaded_files = dl_manager.download(data_urls)
        return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files})]

    def _generate_examples(self, filepaths):
        """This function returns the examples in the raw (text) form by iterating on all the files."""
        id_ = 0
        for filepath in filepaths:
            logger.info("Generating examples from = %s", filepath)
            try:
                with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
                    for line in f:
                        if line:
                            example = json.loads(line)
                            if example is not None and isinstance(example, dict):
                                timestamp = example.get("timestamp", "")
                                # remove the Z at the end (time zone)
                                if isinstance(timestamp, str) and timestamp.endswith("Z"):
                                    timestamp = timestamp[:-1]
                                yield id_, {
                                    "index": example.get("index", ""),
                                    "url": example.get("url", ""),
                                    "timestamp": timestamp,
                                    "matches": ast.literal_eval(example.get("matches", "")),
                                    "text": example.get("text", ""),
                                }
                                id_ += 1
            except Exception:
                logger.exception("Error while processing file %s", filepath)