File size: 3,061 Bytes
4c69e22
 
 
 
 
 
a303b26
4c69e22
 
 
7027049
4c69e22
 
 
 
 
 
1d9d61b
4c69e22
 
 
597de10
1ddf0e8
4c69e22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d8000dc
4c69e22
 
 
 
b32971c
a15cd17
4c69e22
 
 
 
 
a303b26
4c69e22
f1f8eec
4c69e22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
""" TweetTopicMultilingual Dataset """
import json
import datasets

logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
_VERSION = "0.0.3"
_CITATION = """TBA"""
_HOME_PAGE = "https://cardiffnlp.github.io"
_NAME = "tweet_topic_multilingual"
_ROOT_URL = f'https://huggingface.co/datasets/cardiffnlp/{_NAME}/resolve/main/dataset'
_LANGUAGES = ["en", "es", "ja", "gr"]
_URL = {}

# plain split
for lan in _LANGUAGES:
    _URL[lan] = {split: f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for split in ["train", "test", "validation"]}
_URL["en_2022"] = {split: f"{_ROOT_URL}/en_2022/{split}.jsonl" for split in ["train", "validation"]}
# cross validation
for lan in _LANGUAGES:
    _URL.update({
        f"{lan}_cross_validation_{n}": {
            split: f"{_ROOT_URL}/{lan}/cross_validation/{lan}_{split}_{n}.jsonl"
            for split in ["train", "test", "validation"]
        } for n in range(5)
    })


class Config(datasets.BuilderConfig):
    """BuilderConfig"""

    def __init__(self, **kwargs):
        """BuilderConfig.

        Args:
          **kwargs: keyword arguments forwarded to super.
        """
        super(Config, self).__init__(**kwargs)


class TweetTopicMultilingual(datasets.GeneratorBasedBuilder):
    """Dataset."""

    BUILDER_CONFIGS = [
        Config(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION) for i in _URL.keys()
    ]

    def _split_generators(self, dl_manager):
        downloaded_file = dl_manager.download_and_extract(_URL[self.config.name])
        splits = _URL[self.config.name].keys()
        return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]

    def _generate_examples(self, filepath):
        _key = 0
        logger.info("generating examples from = %s", filepath)
        with open(filepath, encoding="utf-8") as f:
            _list = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
            for i in _list:
                yield _key, i
                _key += 1

    def _info(self):
        names = [
            "arts_&_culture", "business_&_entrepreneurs", "celebrity_&_pop_culture", "diaries_&_daily_life", "family",
            "fashion_&_style", "film_tv_&_video", "fitness_&_health", "food_&_dining", "gaming",
            "learning_&_educational", "music", "news_&_social_concern", "other_hobbies", "relationships",
            "science_&_technology", "sports", "travel_&_adventure", "youth_&_student_life"
        ]
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                    "label": datasets.Sequence(datasets.features.ClassLabel(names=names)),
                    "label_name": datasets.Sequence(datasets.Value("string"))
                }
            ),
            supervised_keys=None,
            homepage=_HOME_PAGE,
            citation=_CITATION,
        )