|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Arabic Poetry Metric dataset.""" |
|
|
|
|
|
import os |
|
import datasets |
|
import pandas as pd |
|
|
|
_DESCRIPTION = """\ |
|
Masader is the largest public catalogue for Arabic NLP datasets, which consists of more than 200 datasets annotated with 25 attributes. |
|
""" |
|
|
|
_CITATION = """\ |
|
@misc{alyafeai2021masader, |
|
title={Masader: Metadata Sourcing for Arabic Text and Speech Data Resources}, |
|
author={Zaid Alyafeai and Maraim Masoud and Mustafa Ghaleb and Maged S. Al-shaibani}, |
|
year={2021}, |
|
eprint={2110.06744}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
columns = ['No.', 'Name', 'Subsets', 'Link', 'License', 'Year', 'Language', |
|
'Dialect', 'Domain', 'Form', 'Collection Style', 'Description', |
|
'Volume', 'Unit', 'Ethical Risks', 'Provider', 'Derived From', |
|
'Paper Title', 'Paper Link', 'Script', 'Tokenized', 'Host', 'Access', |
|
'Cost', 'Test Split'] |
|
|
|
class MasaderConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Masader.""" |
|
|
|
def __init__(self, **kwargs): |
|
"""BuilderConfig for MetRec. |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(MasaderConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) |
|
|
|
|
|
class Masader(datasets.GeneratorBasedBuilder): |
|
"""Masaderdataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
MasaderConfig( |
|
name="plain_text", |
|
description="Plain text", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
columns[i]: datasets.Value("string") for i in range(len(columns)) |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/arbml/Masader", |
|
citation=_CITATION,) |
|
|
|
def _split_generators(self, dl_manager): |
|
sheet_id = "1YO-Vl4DO-lnp8sQpFlcX1cDtzxFoVkCmU1PVw_ZHJDg" |
|
sheet_name = "filtered_clean" |
|
url = f"https://docs.google.com/spreadsheets/d/{sheet_id}/gviz/tq?tqx=out:csv&sheet={sheet_name}" |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"url":url } |
|
), |
|
] |
|
|
|
def _generate_examples(self, url): |
|
"""Generate examples.""" |
|
|
|
|
|
|
|
df = pd.read_csv(url, usecols=range(34)) |
|
subsets = {} |
|
entry_list = [] |
|
i = 0 |
|
idx = 0 |
|
|
|
while i < len(df.values) - 1: |
|
|
|
next_entry = df.values[i+1] |
|
curr_entry = df.values[i] |
|
|
|
i+= 1 |
|
if str(curr_entry[0]) != "nan": |
|
entry_list = curr_entry |
|
|
|
if str(next_entry[0]) == "nan": |
|
subsets[next_entry[2]] = {'Dialect':next_entry[7], 'Volume':next_entry[12], 'Unit':next_entry[13]} |
|
continue |
|
idx += 1 |
|
masader_entry = {col:entry_list[j] for j,col in enumerate(columns) if j != 2} |
|
masader_entry['Subsets'] = subsets |
|
subsets = {} |
|
|
|
yield idx, masader_entry |
|
|