File size: 5,558 Bytes
4c69e22
 
c965b69
1cd8221
4c69e22
 
 
 
7813e95
4c69e22
 
 
7d190cb
4c69e22
7d190cb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c69e22
7d190cb
4c69e22
 
2deda21
 
89c175f
 
 
7813e95
 
 
 
4c69e22
 
 
597de10
2deda21
4c69e22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8000dc
4c69e22
 
 
 
b32971c
a15cd17
4c69e22
2deda21
4c69e22
c51e0cd
c506bfe
c51e0cd
1cd8221
 
 
 
4c69e22
 
 
 
 
 
 
 
7d190cb
 
4c69e22
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
""" TweetTopicMultilingual Dataset """
import json
from typing import List

import datasets

logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
_VERSION = "0.0.91"
_CITATION = """TBA"""
_HOME_PAGE = "https://cardiffnlp.github.io"
_NAME = "tweet_topic_multilingual"
_ROOT_URL = f"https://huggingface.co/datasets/cardiffnlp/{_NAME}/resolve/main/dataset"
_LANGUAGES = ["en", "es", "ja", "gr"]
_CLASS_MAPPING = {
    "en": [
        "Arts & Culture",
        "Business & Entrepreneurs",
        "Celebrity & Pop Culture",
        "Diaries & Daily Life",
        "Family",
        "Fashion & Style",
        "Film, TV & Video",
        "Fitness & Health",
        "Food & Dining",
        "Learning & Educational",
        "News & Social Concern",
        "Relationships",
        "Science & Technology",
        "Youth & Student Life",
        "Music",
        "Gaming",
        "Sports",
        "Travel & Adventure",
        "Other Hobbies"
    ],
    "gr": [
        "Τέχνες & Πολιτισμός",
        "Επιχειρήσεις & Επιχειρηματίες",
        "Διασημότητες & Ποπ κουλτούρα",
        "Ημερολόγια & Καθημερινή ζωή",
        "Οικογένεια",
        "Μόδα & Στυλ",
        "Ταινίες, τηλεόραση & βίντεο",
        "Γυμναστική & Υεία",
        "Φαγητό & Δείπνο",
        "Μάθηση & Εκπαίδευση",
        "Ειδήσεις & Κοινωνία",
        "Σχέσεις",
        "Επιστήμη & Τεχνολογία",
        "Νεανική & Φοιτητική ζωή",
        "Μουσική",
        "Παιχνίδια",
        "Αθλητισμός",
        "Ταξίδια & Περιπέτεια",
        "Άλλα χόμπι"
    ],
    "es": [
        "Arte y cultura",
        "Negocios y emprendedores",
        "Celebridades y cultura pop",
        "Diarios y vida diaria",
        "Familia",
        "Moda y estilo",
        "Cine, televisión y video",
        "Estado físico y salud",
        "Comida y comedor",
        "Aprendizaje y educación",
        "Noticias e interés social",
        "Relaciones",
        "Ciencia y Tecnología",
        "Juventud y Vida Estudiantil",
        "Música",
        "Juegos",
        "Deportes",
        "Viajes y aventuras",
        "Otros pasatiempos"
    ],
    "ja": [
        "アート&カルチャー",
        "ビジネス",
        "芸能",
        "日常",
        "家族",
        "ファッション",
        "映画&ラジオ",
        "フィットネス&健康",
        "料理",
        "教育関連",
        "社会",
        "人間関係",
        "サイエンス",
        "学校",
        "音楽",
        "ゲーム",
        "スポーツ",
        "旅行",
        "その他"
    ]
}

_URL = {}
# plain split
for lan in _LANGUAGES:
    _URL[lan] = {split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl"] for split in ["train", "test", "validation"]}
_URL["en_2022"] = {split: [f"{_ROOT_URL}/en_2022/{split}.jsonl"] for split in ["train", "validation"]}
_URL["mix"] = {
    split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] for split in ["train", "validation"]
}
_URL["mix_2022"] = {
    split: [f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for lan in _LANGUAGES] + [f"{_ROOT_URL}/en_2022/{split}.jsonl"]
    for split in ["train", "validation"]
}
# cross validation
for lan in _LANGUAGES:
    _URL.update({
        f"{lan}_cross_validation_{n}": {
            split: [f"{_ROOT_URL}/{lan}/cross_validation/{lan}_{split}_{n}.jsonl"]
            for split in ["train", "test", "validation"]
        } for n in range(5)
    })


class Config(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        """BuilderConfig.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(Config, self).__init__(**kwargs)


class TweetTopicMultilingual(datasets.GeneratorBasedBuilder):
    """Dataset."""

    BUILDER_CONFIGS = [
        Config(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION) for i in _URL.keys()
    ]

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URL[self.config.name])
        splits = _URL[self.config.name].keys()
        return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]

    def _generate_examples(self, filepath: List[str]):
        _key = 0
        for _file in filepath:
            logger.info("generating examples from = %s", _file)
            with open(_file, encoding="utf-8") as f:
                _list = [json.loads(i) for i in f.read().split("\n") if len(i) > 0]
                for i in _list:
                    yield _key, i
                    _key += 1

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "label_name_flatten": datasets.Value("string"),
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=_CLASS_MAPPING["en"])),
                    "label_name": datasets.Sequence(datasets.Value("string"))
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
        )