|
import textwrap |
|
import datasets |
|
from typing import Dict, List, Optional, Union |
|
|
|
import xml.etree.ElementTree as ET |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
|
|
|
_LENERBR_KWARGS = dict( |
|
name = "LeNER-Br", |
|
description=textwrap.dedent( |
|
"""\ |
|
LeNER-Br is a Portuguese language dataset for named entity recognition applied to legal documents. |
|
LeNER-Br consists entirely of manually annotated legislation and legal cases texts and contains tags |
|
for persons, locations, time entities, organizations, legislation and legal cases. To compose the dataset, |
|
66 legal documents from several Brazilian Courts were collected. Courts of superior and state levels were considered, |
|
such as Supremo Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas Gerais and Tribunal de Contas da União. |
|
In addition, four legislation documents were collected, such as "Lei Maria da Penha", giving a total of 70 documents.""" |
|
), |
|
task_type="ner", |
|
label_classes=["ORGANIZACAO", "PESSOA", "TEMPO", "LOCAL", "LEGISLACAO", "JURISPRUDENCIA"], |
|
data_urls={ |
|
"train": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/train/train.conll", |
|
"dev": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/dev/dev.conll", |
|
"test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll", |
|
}, |
|
citation=textwrap.dedent( |
|
"""\ |
|
@InProceedings{luz_etal_propor2018, |
|
author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and |
|
Renato R. R. {de Oliveira} and Matheus Stauffer and |
|
Samuel Couto and Paulo Bermejo}, |
|
title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text}, |
|
booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})}, |
|
publisher = {Springer}, |
|
series = {Lecture Notes on Computer Science ({LNCS})}, |
|
pages = {313--323}, |
|
year = {2018}, |
|
month = {September 24-26}, |
|
address = {Canela, RS, Brazil}, |
|
doi = {10.1007/978-3-319-99722-3_32}, |
|
url = {https://teodecampos.github.io/LeNER-Br/}, |
|
}""" |
|
), |
|
url="https://teodecampos.github.io/LeNER-Br/", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
_ASSIN2_BASE_KWARGS = dict( |
|
description=textwrap.dedent( |
|
"""\ |
|
The ASSIN 2 corpus is composed of rather simple sentences. Following the procedures of SemEval 2014 Task 1. |
|
The training and validation data are composed, respectively, of 6,500 and 500 sentence pairs in Brazilian Portuguese, |
|
annotated for entailment and semantic similarity. Semantic similarity values range from 1 to 5, and text entailment |
|
classes are either entailment or none. The test data are composed of approximately 3,000 sentence pairs with the same |
|
annotation. All data were manually annotated.""" |
|
), |
|
data_urls={ |
|
"train": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-train-only.xml", |
|
"dev": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-dev.xml", |
|
"test": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-test.xml", |
|
}, |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{real2020assin, |
|
title={The assin 2 shared task: a quick overview}, |
|
author={Real, Livy and Fonseca, Erick and Oliveira, Hugo Goncalo}, |
|
booktitle={International Conference on Computational Processing of the Portuguese Language}, |
|
pages={406--412}, |
|
year={2020}, |
|
organization={Springer} |
|
}""" |
|
), |
|
url="https://sites.google.com/view/assin2", |
|
) |
|
_ASSIN2_RTE_KWARGS = dict( |
|
name = "assin2-rte", |
|
task_type="rte", |
|
label_classes=["NONE", "ENTAILMENT"], |
|
**_ASSIN2_BASE_KWARGS |
|
) |
|
|
|
|
|
class PTBenchmarkConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for PTBenchmark.""" |
|
|
|
def __init__( |
|
self, |
|
task_type, |
|
data_urls, |
|
citation, |
|
url, |
|
label_classes=None, |
|
process_label=lambda x: x, |
|
**kwargs, |
|
): |
|
"""BuilderConfig for GLUE. |
|
Args: |
|
text_features: `dict[string, string]`, map from the name of the feature |
|
dict for each text field to the name of the column in the tsv file |
|
label_column: `string`, name of the column in the tsv file corresponding |
|
to the label |
|
data_url: `string`, url to download the zip file from |
|
data_dir: `string`, the path to the folder containing the tsv files in the |
|
downloaded zip |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
label_classes: `list[string]`, the list of classes if the label is |
|
categorical. If not provided, then the label will be of type |
|
`datasets.Value('float32')`. |
|
process_label: `Function[string, any]`, function taking in the raw value |
|
of the label and processing it to the form required by the label feature |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(PTBenchmarkConfig, self).__init__(version=datasets.Version("1.0.3", ""), **kwargs) |
|
self.label_classes = label_classes |
|
self.task_type = task_type |
|
self.data_urls = data_urls |
|
self.citation = citation |
|
self.url = url |
|
self.process_label = process_label |
|
|
|
def _get_ner_dataset_info(config): |
|
bio_labels = ["O"] |
|
for label_name in config.label_classes: |
|
bio_labels.append("B-" + label_name) |
|
bio_labels.append("I-" + label_name) |
|
return datasets.DatasetInfo( |
|
description=config.description, |
|
homepage=config.url, |
|
citation=config.citation, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"tokens": datasets.Sequence(datasets.Value("string")), |
|
"ner_tags": datasets.Sequence( |
|
datasets.features.ClassLabel(names=bio_labels) |
|
), |
|
} |
|
) |
|
) |
|
|
|
def _get_rte_dataset_info(config): |
|
return datasets.DatasetInfo( |
|
description=config.description, |
|
homepage=config.url, |
|
citation=config.citation, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"sentence1": datasets.Value("string"), |
|
"sentence2": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=config.label_classes), |
|
} |
|
) |
|
) |
|
|
|
def _conll_ner_generator(file_path): |
|
with open(file_path, encoding="utf-8") as f: |
|
|
|
guid = 0 |
|
tokens = [] |
|
ner_tags = [] |
|
|
|
for line in f: |
|
if line == "" or line == "\n": |
|
if tokens: |
|
yield guid, { |
|
"id": str(guid), |
|
"tokens": tokens, |
|
"ner_tags": ner_tags, |
|
} |
|
guid += 1 |
|
tokens = [] |
|
ner_tags = [] |
|
else: |
|
splits = line.split(" ") |
|
tokens.append(splits[0]) |
|
ner_tags.append(splits[1].rstrip()) |
|
|
|
|
|
yield guid, { |
|
"id": str(guid), |
|
"tokens": tokens, |
|
"ner_tags": ner_tags, |
|
} |
|
|
|
def _assin2_rte_generator(file_path): |
|
"""Yields examples.""" |
|
id_ = 0 |
|
|
|
with open(file_path, "rb") as f: |
|
|
|
tree = ET.parse(f) |
|
root = tree.getroot() |
|
|
|
for pair in root: |
|
|
|
yield id_, { |
|
"id": int(pair.attrib.get("id")), |
|
"sentence1": pair.find(".//t").text, |
|
"sentence2": pair.find(".//h").text, |
|
|
|
"label": pair.attrib.get("entailment").upper(), |
|
} |
|
|
|
id_ += 1 |
|
|
|
|
|
class PTBenchmark(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
PTBenchmarkConfig( |
|
**_LENERBR_KWARGS |
|
), |
|
PTBenchmarkConfig( |
|
**_ASSIN2_RTE_KWARGS |
|
) |
|
] |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
if self.config.task_type == "ner": |
|
return _get_ner_dataset_info(self.config) |
|
elif self.config.task_type == "rte": |
|
return _get_rte_dataset_info(self.config) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
file_paths = dl_manager.download_and_extract(self.config.data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"file_path": file_paths["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"file_path": file_paths["dev"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"file_path": file_paths["test"]}, |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
file_path: Optional[str] = None |
|
): |
|
logger.info("⏳ Generating examples from = %s", file_path) |
|
if self.config.task_type == "ner": |
|
yield from _conll_ner_generator(file_path) |
|
elif self.config.task_type == "rte": |
|
if "assin2" in self.config.name: |
|
yield from _assin2_rte_generator(file_path) |
|
|