tweet_topic_multilingual / tweet_topic_multilingual.py
asahi417's picture
init
7d190cb
raw
history blame
5.15 kB
""" TweetTopicMultilingual Dataset """
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """[TweetTopicMultilingual](TBA)"""
_VERSION = "0.0.3"
_CITATION = """TBA"""
_HOME_PAGE = "https://cardiffnlp.github.io"
_NAME = "tweet_topic_multilingual"
_ROOT_URL = f"https://huggingface.co/datasets/cardiffnlp/{_NAME}/resolve/main/dataset"
_LANGUAGES = ["en", "es", "ja", "gr"]
_CLASS_MAPPING = {
"en": [
"Arts & Culture",
"Business & Entrepreneurs",
"Celebrity & Pop Culture",
"Diaries & Daily Life",
"Family",
"Fashion & Style",
"Film, TV & Video",
"Fitness & Health",
"Food & Dining",
"Learning & Educational",
"News & Social Concern",
"Relationships",
"Science & Technology",
"Youth & Student Life",
"Music",
"Gaming",
"Sports",
"Travel & Adventure",
"Other Hobbies"
],
"gr": [
"Τέχνες & Πολιτισμός",
"Επιχειρήσεις & Επιχειρηματίες",
"Διασημότητες & Ποπ κουλτούρα",
"Ημερολόγια & Καθημερινή ζωή",
"Οικογένεια",
"Μόδα & Στυλ",
"Ταινίες, τηλεόραση & βίντεο",
"Γυμναστική & Υεία",
"Φαγητό & Δείπνο",
"Μάθηση & Εκπαίδευση",
"Ειδήσεις & Κοινωνία",
"Σχέσεις",
"Επιστήμη & Τεχνολογία",
"Νεανική & Φοιτητική ζωή",
"Μουσική",
"Παιχνίδια",
"Αθλητισμός",
"Ταξίδια & Περιπέτεια",
"Άλλα χόμπι"
],
"es": [
"Arte y cultura",
"Negocios y emprendedores",
"Celebridades y cultura pop",
"Diarios y vida diaria",
"Familia",
"Moda y estilo",
"Cine, televisión y video",
"Estado físico y salud",
"Comida y comedor",
"Aprendizaje y educación",
"Noticias e interés social",
"Relaciones",
"Ciencia y Tecnología",
"Juventud y Vida Estudiantil",
"Música",
"Juegos",
"Deportes",
"Viajes y aventuras",
"Otros pasatiempos"
],
"ja": [
"アート&カルチャー",
"ビジネス",
"芸能",
"日常",
"家族",
"ファッション",
"映画&ラジオ",
"フィットネス&健康",
"料理",
"教育関連",
"社会",
"人間関係",
"サイエンス",
"学校",
"音楽",
"ゲーム",
"スポーツ",
"旅行",
"その他"
]
}
_URL = {}
# plain split
for lan in _LANGUAGES:
_URL[lan] = {split: f"{_ROOT_URL}/{lan}/{lan}_{split}.jsonl" for split in ["train", "test", "validation"]}
_URL["en_2022"] = {split: f"{_ROOT_URL}/en_2022/{split}.jsonl" for split in ["train", "validation"]}
# cross validation
for lan in _LANGUAGES:
_URL.update({
f"{lan}_cross_validation_{n}": {
split: f"{_ROOT_URL}/{lan}/cross_validation/{lan}_{split}_{n}.jsonl"
for split in ["train", "test", "validation"]
} for n in range(5)
})
class Config(datasets.BuilderConfig):
"""BuilderConfig"""
def __init__(self, **kwargs):
"""BuilderConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Config, self).__init__(**kwargs)
class TweetTopicMultilingual(datasets.GeneratorBasedBuilder):
"""Dataset."""
BUILDER_CONFIGS = [
Config(name=i, version=datasets.Version(_VERSION), description=_DESCRIPTION) for i in _URL.keys()
]
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URL[self.config.name])
splits = _URL[self.config.name].keys()
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepath": downloaded_file[i]}) for i in splits]
def _generate_examples(self, filepath):
_key = 0
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
_list = [json.loads(i) for i in f.read().split("\n") if len(i) > 0]
for i in _list:
yield _key, i
_key += 1
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
"label_name_flatten": datasets.Value("string"),
"label": datasets.Sequence(datasets.features.ClassLabel(names=_CLASS_MAPPING["en"])),
"label_name": datasets.Sequence(datasets.Value("string"))
}
),
supervised_keys=None,
homepage=_HOME_PAGE,
citation=_CITATION,
)