# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""Bernice pretrain data""" | |
import csv | |
import json | |
import os | |
import gzip | |
import datasets | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
Alexandra DeLucia, Shijie Wu, Aaron Mueller, Carlos Aguirre, Philip Resnik, and Mark Dredze. 2022. | |
Bernice: A Multilingual Pre-trained Encoder for Twitter. In Proceedings of the 2022 Conference on | |
Empirical Methods in Natural Language Processing, pages 6191–6205, Abu Dhabi, United Arab Emirates. | |
Association for Computational Linguistics. | |
""" | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder. | |
The tweets are from the public 1% Twitter API stream from January 2016 to December 2021. | |
Twitter-provided language metadata is provided with the tweet ID. The data contains 66 unique languages, | |
as identified by ISO 639 language codes, including `und` for undefined languages. | |
Tweets need to be re-gathered via the Twitter API. | |
""" | |
_HOMEPAGE = "https://preview.aclanthology.org/emnlp-22-ingestion/2022.emnlp-main.415" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files. | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
# If the data files live in the same folder or repository of the dataset script, | |
# you can just pass the relative paths to the files instead of URLs. | |
# Only train data, validation split not provided | |
_BASE_DATA_URL = "https://huggingface.co/datasets/jhu-clsp/bernice-pretrain-data/resolve/main/data" | |
_URLS = { | |
"all": ['2016_01.txt.gz', '2016_02.txt.gz', '2016_03.txt.gz', '2016_04.txt.gz', '2016_05.txt.gz', '2016_06.txt.gz', | |
'2016_07.txt.gz', '2016_08.txt.gz', '2016_09.txt.gz', '2016_10.txt.gz', '2016_11.txt.gz', '2016_12.txt.gz', | |
'2017_01.txt.gz', '2017_02.txt.gz', '2017_03.txt.gz', '2017_04.txt.gz', '2017_05.txt.gz', '2017_06.txt.gz', | |
'2017_07.txt.gz', '2017_09.txt.gz', '2017_10.txt.gz', '2017_11.txt.gz', '2017_12.txt.gz', '2018_01.txt.gz', | |
'2018_02.txt.gz', '2018_03.txt.gz', '2018_04.txt.gz', '2018_05.txt.gz', '2018_06.txt.gz', '2018_07.txt.gz', | |
'2018_08.txt.gz', '2018_09.txt.gz', '2018_10.txt.gz', '2018_11.txt.gz', '2018_12.txt.gz', '2019_01.txt.gz', | |
'2019_02.txt.gz', '2019_03.txt.gz', '2019_04.txt.gz', '2019_05.txt.gz', '2019_06.txt.gz', '2019_07.txt.gz', | |
'2019_08.txt.gz', '2019_09.txt.gz', '2019_10.txt.gz', '2019_11.txt.gz', '2019_12.txt.gz', '2020_01.txt.gz', | |
'2020_02.txt.gz', '2020_03.txt.gz', '2020_04.txt.gz', '2020_05.txt.gz', '2020_06.txt.gz', '2020_07.txt.gz', | |
'2020_08.txt.gz', '2020_09.txt.gz', '2020_10.txt.gz', '2020_11.txt.gz', '2020_12.txt.gz', '2021_01.txt.gz', | |
'2021_02.txt.gz', '2021_03.txt.gz', '2021_04.txt.gz', '2021_05.txt.gz', '2021_06.txt.gz', '2021_07.txt.gz', | |
'2021_08.txt.gz', '2021_09.txt.gz', '2021_10.txt.gz', '2021_11.txt.gz', '2021_12.txt.gz'], | |
"indic": ["indic_tweet_ids.txt.gz"] | |
} | |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case | |
class BernicePretrainData(datasets.GeneratorBasedBuilder): | |
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder.""" | |
VERSION = datasets.Version("1.0.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="all", version=VERSION, | |
description="Includes all tweets"), | |
datasets.BuilderConfig(name="indic", version=VERSION, | |
description="Only the Indic languages, plus `undefined'"), | |
] | |
DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
# Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
features=datasets.Features( | |
{ | |
"tweet_id": datasets.Value("string"), | |
"lang": datasets.Value("string"), | |
"year": datasets.Value("string") | |
} | |
), | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
urls_to_download = [f"{_BASE_DATA_URL}/{self.config.name}/{f}" for f in _URLS[self.config.name]] | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepaths": downloaded_files, | |
"split": "train", | |
}, | |
) | |
] | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepaths, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
for filepath in filepaths: | |
with open(filepath, encoding="utf-8") as f: | |
for line_number, instance in enumerate(f): | |
tweet_id, lang, year = instance.strip().split("\t") | |
yield tweet_id, { | |
"tweet_id": tweet_id, | |
"lang": lang, | |
"year": year | |
} | |