eduagarcia commited on
Commit
1de8de7
1 Parent(s): 1e95b76

Add LeNER-Br dataset

Browse files
Files changed (1) hide show
  1. portuguese_benchmark.py +171 -0
portuguese_benchmark.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import textwrap
2
+ import datasets
3
+ from typing import Dict, List, Optional, Union
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+
8
+ _LENERBR_KWARGS = dict(
9
+ name = "LeNER-Br",
10
+ description=textwrap.dedent(
11
+ """\
12
+ LeNER-Br is a Portuguese language dataset for named entity recognition applied to legal documents.
13
+ LeNER-Br consists entirely of manually annotated legislation and legal cases texts and contains tags
14
+ for persons, locations, time entities, organizations, legislation and legal cases. To compose the dataset,
15
+ 66 legal documents from several Brazilian Courts were collected. Courts of superior and state levels were considered,
16
+ such as Supremo Tribunal Federal, Superior Tribunal de Justiça, Tribunal de Justiça de Minas Gerais and Tribunal de Contas da União.
17
+ In addition, four legislation documents were collected, such as "Lei Maria da Penha", giving a total of 70 documents."""
18
+ ),
19
+ task_type="ner",
20
+ label_classes=["ORGANIZACAO", "PESSOA", "TEMPO", "LOCAL", "LEGISLACAO", "JURISPRUDENCIA"],
21
+ data_urls={
22
+ "train": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/train/train.conll",
23
+ "dev": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/dev/dev.conll",
24
+ "test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll",
25
+ },
26
+ citation=textwrap.dedent(
27
+ """\
28
+ @InProceedings{luz_etal_propor2018,
29
+ author = {Pedro H. {Luz de Araujo} and Te\'{o}filo E. {de Campos} and
30
+ Renato R. R. {de Oliveira} and Matheus Stauffer and
31
+ Samuel Couto and Paulo Bermejo},
32
+ title = {{LeNER-Br}: a Dataset for Named Entity Recognition in {Brazilian} Legal Text},
33
+ booktitle = {International Conference on the Computational Processing of Portuguese ({PROPOR})},
34
+ publisher = {Springer},
35
+ series = {Lecture Notes on Computer Science ({LNCS})},
36
+ pages = {313--323},
37
+ year = {2018},
38
+ month = {September 24-26},
39
+ address = {Canela, RS, Brazil},
40
+ doi = {10.1007/978-3-319-99722-3_32},
41
+ url = {https://teodecampos.github.io/LeNER-Br/},
42
+ }"""
43
+ ),
44
+ url="https://teodecampos.github.io/LeNER-Br/",
45
+ )
46
+
47
+ class PTBenchmarkConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for PTBenchmark."""
49
+
50
+ def __init__(
51
+ self,
52
+ task_type,
53
+ data_urls,
54
+ citation,
55
+ url,
56
+ label_classes=None,
57
+ process_label=lambda x: x,
58
+ **kwargs,
59
+ ):
60
+ """BuilderConfig for GLUE.
61
+ Args:
62
+ text_features: `dict[string, string]`, map from the name of the feature
63
+ dict for each text field to the name of the column in the tsv file
64
+ label_column: `string`, name of the column in the tsv file corresponding
65
+ to the label
66
+ data_url: `string`, url to download the zip file from
67
+ data_dir: `string`, the path to the folder containing the tsv files in the
68
+ downloaded zip
69
+ citation: `string`, citation for the data set
70
+ url: `string`, url for information about the data set
71
+ label_classes: `list[string]`, the list of classes if the label is
72
+ categorical. If not provided, then the label will be of type
73
+ `datasets.Value('float32')`.
74
+ process_label: `Function[string, any]`, function taking in the raw value
75
+ of the label and processing it to the form required by the label feature
76
+ **kwargs: keyword arguments forwarded to super.
77
+ """
78
+ super(PTBenchmarkConfig, self).__init__(version=datasets.Version("1.0.3", ""), **kwargs)
79
+ self.label_classes = label_classes
80
+ self.task_type = task_type
81
+ self.data_urls = data_urls
82
+ self.citation = citation
83
+ self.url = url
84
+ self.process_label = process_label
85
+
86
+ def _get_ner_dataset_info(config):
87
+ bio_labels = ["O"]
88
+ for label_name in config.label_classes:
89
+ bio_labels.append("B-" + label_name)
90
+ bio_labels.append("I-" + label_name)
91
+ return datasets.DatasetInfo(
92
+ description=config.description,
93
+ homepage=config.url,
94
+ citation=config.citation,
95
+ features=datasets.Features(
96
+ {
97
+ "id": datasets.Value("string"),
98
+ "tokens": datasets.Sequence(datasets.Value("string")),
99
+ "ner_tags": datasets.Sequence(
100
+ datasets.features.ClassLabel(names=bio_labels)
101
+ ),
102
+ }
103
+ )
104
+ )
105
+
106
+ def _conll_ner_generator(file_path):
107
+ with open(file_path, encoding="utf-8") as f:
108
+
109
+ guid = 0
110
+ tokens = []
111
+ ner_tags = []
112
+
113
+ for line in f:
114
+ if line == "" or line == "\n":
115
+ if tokens:
116
+ yield guid, {
117
+ "id": str(guid),
118
+ "tokens": tokens,
119
+ "ner_tags": ner_tags,
120
+ }
121
+ guid += 1
122
+ tokens = []
123
+ ner_tags = []
124
+ else:
125
+ splits = line.split(" ")
126
+ tokens.append(splits[0])
127
+ ner_tags.append(splits[1].rstrip())
128
+
129
+ # last example
130
+ yield guid, {
131
+ "id": str(guid),
132
+ "tokens": tokens,
133
+ "ner_tags": ner_tags,
134
+ }
135
+
136
+
137
+ class PTBenchmark(datasets.GeneratorBasedBuilder):
138
+ BUILDER_CONFIGS = [
139
+ PTBenchmarkConfig(
140
+ **_LENERBR_KWARGS
141
+ )
142
+ ]
143
+
144
+ def _info(self) -> datasets.DatasetInfo:
145
+ if self.config.task_type == "ner":
146
+ return _get_ner_dataset_info(self.config)
147
+
148
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
149
+ file_paths = dl_manager.download_and_extract(self.config.data_urls)
150
+ return [
151
+ datasets.SplitGenerator(
152
+ name=datasets.Split.TRAIN,
153
+ gen_kwargs={"file_path": file_paths["train"]},
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.VALIDATION,
157
+ gen_kwargs={"file_path": file_paths["dev"]},
158
+ ),
159
+ datasets.SplitGenerator(
160
+ name=datasets.Split.TEST,
161
+ gen_kwargs={"file_path": file_paths["test"]},
162
+ )
163
+ ]
164
+
165
+ def _generate_examples(
166
+ self,
167
+ file_path: Optional[str] = None
168
+ ):
169
+ logger.info("⏳ Generating examples from = %s", file_path)
170
+ if self.config.task_type == "ner":
171
+ yield from _conll_ner_generator(file_path)