eduagarcia commited on
Commit
69440bb
1 Parent(s): 5d3fb60

Add Ulysses-C, brazillian_court_decisions and HAREM

Browse files
Files changed (1) hide show
  1. portuguese_benchmark.py +165 -19
portuguese_benchmark.py CHANGED
@@ -1,4 +1,5 @@
1
  import datasets
 
2
  from typing import Dict, List, Optional, Union, Callable
3
  import json
4
  import textwrap
@@ -158,9 +159,6 @@ _ULYSSESNER_META_KWARGS = dict(
158
  from legislation."""
159
  ),
160
  task_type="ner",
161
- label_classes=['DATA', 'EVENTO', 'FUNDapelido', 'FUNDlei', 'FUNDprojetodelei', 'LOCALconcreto', 'LOCALvirtual', \
162
- 'ORGgovernamental', 'ORGnaogovernamental', 'ORGpartido', 'PESSOAcargo', 'PESSOAgrupocargo', 'PESSOAindividual', \
163
- 'PRODUTOoutros', 'PRODUTOprograma', 'PRODUTOsistema'],
164
  citation=textwrap.dedent(
165
  """\
166
  @InProceedings{10.1007/978-3-030-98305-5_1,
@@ -235,21 +233,138 @@ _ULYSSESNER_META_KWARGS = dict(
235
  )
236
  _ULYSSESNER_PL_KWARGS = dict(
237
  name = "UlyssesNER-Br-PL",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  data_urls = {
239
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/train.txt",
240
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/valid.txt",
241
  "test": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/test.txt",
242
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  **_ULYSSESNER_META_KWARGS
244
  )
245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
  class PTBenchmarkConfig(datasets.BuilderConfig):
247
  """BuilderConfig for PTBenchmark."""
248
 
249
  def __init__(
250
  self,
251
  task_type: str,
252
- data_urls: Dict[str, str],
253
  citation: str,
254
  url: str,
255
  label_classes: Optional[List[Union[str, int]]] = None,
@@ -257,6 +372,7 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
257
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
258
  indexes_url: Optional[str] = None, #indexes for train, dev and test for single file datasets
259
  process_label: Optional[Callable[[str], str]] = lambda x: x,
 
260
  **kwargs,
261
  ):
262
  """BuilderConfig for GLUE.
@@ -287,6 +403,7 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
287
  self.text_and_label_columns = text_and_label_columns
288
  self.indexes_url = indexes_url
289
  self.process_label = process_label
 
290
 
291
  def _get_classification_features(config: PTBenchmarkConfig):
292
  return datasets.Features(
@@ -421,20 +538,13 @@ def _assin2_generator(file_path, config: PTBenchmarkConfig):
421
  class PTBenchmark(datasets.GeneratorBasedBuilder):
422
  BUILDER_CONFIGS = [
423
  PTBenchmarkConfig(
424
- **_LENERBR_KWARGS
425
- ),
426
- PTBenchmarkConfig(
427
- **_ASSIN2_RTE_KWARGS
428
- ),
429
- PTBenchmarkConfig(
430
- **_ASSIN2_STS_KWARGS
431
- ),
432
- PTBenchmarkConfig(
433
- **_HATEBR_KWARGS
434
- ),
435
- PTBenchmarkConfig(
436
- **_ULYSSESNER_PL_KWARGS
437
- )
438
  ]
439
 
440
  def _info(self) -> datasets.DatasetInfo:
@@ -457,6 +567,14 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
457
  )
458
 
459
  def _split_generators(self, dl_manager: datasets.DownloadManager):
 
 
 
 
 
 
 
 
460
  data_urls = self.config.data_urls.copy()
461
  if self.config.indexes_url is not None:
462
  data_urls['indexes'] = self.config.indexes_url
@@ -500,6 +618,10 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
500
  split: Optional[str] = None
501
  ):
502
  logger.info("⏳ Generating examples from = %s", file_path)
 
 
 
 
503
  if self.config.task_type == "classification":
504
  if self.config.file_type == "csv":
505
  yield from _csv_generator(
@@ -508,8 +630,32 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
508
  indexes_path=indexes_path,
509
  split=split
510
  )
 
 
 
 
 
 
 
 
 
 
 
 
511
  elif self.config.task_type == "ner":
512
- yield from _conll_ner_generator(file_path, self.config)
 
 
 
 
 
 
 
 
 
 
 
 
513
  elif self.config.task_type == "rte":
514
  if "assin2" in self.config.name:
515
  yield from _assin2_generator(file_path, self.config)
 
1
  import datasets
2
+ from datasets import ClassLabel
3
  from typing import Dict, List, Optional, Union, Callable
4
  import json
5
  import textwrap
 
159
  from legislation."""
160
  ),
161
  task_type="ner",
 
 
 
162
  citation=textwrap.dedent(
163
  """\
164
  @InProceedings{10.1007/978-3-030-98305-5_1,
 
233
  )
234
  _ULYSSESNER_PL_KWARGS = dict(
235
  name = "UlyssesNER-Br-PL",
236
+ data_urls = {
237
+ "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/train.txt",
238
+ "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/valid.txt",
239
+ "test": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/test.txt",
240
+ },
241
+ label_classes = ['DATA', 'EVENTO', 'FUNDAMENTO', 'LOCAL', 'ORGANIZACAO', 'PESSOA', 'PRODUTODELEI'],
242
+ **_ULYSSESNER_META_KWARGS
243
+ )
244
+ _ULYSSESNER_C_KWARGS = dict(
245
+ name = "UlyssesNER-Br-C",
246
+ data_urls = {
247
+ "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/train.txt",
248
+ "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/valid.txt",
249
+ "test": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/test.txt",
250
+ },
251
+ label_classes = ['DATA', 'EVENTO', 'FUNDAMENTO', 'LOCAL', 'ORGANIZACAO', 'PESSOA', 'PRODUTODELEI'],
252
+ **_ULYSSESNER_META_KWARGS
253
+ )
254
+
255
+ _ULYSSESNER_PL_TIPOS_KWARGS = dict(
256
+ name = "UlyssesNER-Br-PL-tipos",
257
  data_urls = {
258
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/train.txt",
259
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/valid.txt",
260
  "test": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/test.txt",
261
  },
262
+ label_classes = ['DATA', 'EVENTO', 'FUNDapelido', 'FUNDlei', 'FUNDprojetodelei', 'LOCALconcreto', 'LOCALvirtual', \
263
+ 'ORGgovernamental', 'ORGnaogovernamental', 'ORGpartido', 'PESSOAcargo', 'PESSOAgrupocargo', 'PESSOAindividual', \
264
+ 'PRODUTOoutros', 'PRODUTOprograma', 'PRODUTOsistema'],
265
+ **_ULYSSESNER_META_KWARGS
266
+ )
267
+ _ULYSSESNER_C_TIPOS_KWARGS = dict(
268
+ name = "UlyssesNER-Br-C-tipos",
269
+ data_urls = {
270
+ "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/train.txt",
271
+ "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/valid.txt",
272
+ "test": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/test.txt",
273
+ },
274
+ label_classes = ['DATA', 'EVENTO', 'FUNDapelido', 'FUNDlei', 'FUNDprojetodelei', 'LOCALconcreto', 'LOCALvirtual', \
275
+ 'ORGgovernamental', 'ORGnaogovernamental', 'ORGpartido', 'PESSOAcargo', 'PESSOAgrupocargo', 'PESSOAgrupoind', \
276
+ 'PESSOAindividual', 'PRODUTOoutros', 'PRODUTOprograma', 'PRODUTOsistema'],
277
  **_ULYSSESNER_META_KWARGS
278
  )
279
 
280
+ _BRAZILIAN_COURT_DECISIONS_JUDGMENT = dict(
281
+ name = "brazilian_court_decisions_judgment",
282
+ task_type = "classification",
283
+ data_urls = "joelito/brazilian_court_decisions",
284
+ text_and_label_columns = ["decision_description", "judgment_label"],
285
+ file_type="hf_dataset",
286
+ url = "https://github.com/lagefreitas/predicting-brazilian-court-decisions",
287
+ description =textwrap.dedent(
288
+ """\
289
+ The dataset is a collection of 4043 Ementa (summary) court decisions and their metadata from the Tribunal de
290
+ Justiça de Alagoas (TJAL, the State Supreme Court of Alagoas (Brazil). The court decisions are labeled according
291
+ to 7 categories and whether the decisions were unanimous on the part of the judges or not. The dataset
292
+ supports the task of Legal Judgment Prediction."""
293
+ ),
294
+ citation = textwrap.dedent(
295
+ """\
296
+ @article{Lage-Freitas2022,
297
+ author = {Lage-Freitas, Andr{\'{e}} and Allende-Cid, H{\'{e}}ctor and Santana, Orivaldo and Oliveira-Lage, L{\'{i}}via},
298
+ doi = {10.7717/peerj-cs.904},
299
+ issn = {2376-5992},
300
+ journal = {PeerJ. Computer science},
301
+ keywords = {Artificial intelligence,Jurimetrics,Law,Legal,Legal NLP,Legal informatics,Legal outcome forecast,Litigation prediction,Machine learning,NLP,Portuguese,Predictive algorithms,judgement prediction},
302
+ language = {eng},
303
+ month = {mar},
304
+ pages = {e904--e904},
305
+ publisher = {PeerJ Inc.},
306
+ title = {{Predicting Brazilian Court Decisions}},
307
+ url = {https://pubmed.ncbi.nlm.nih.gov/35494851 https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9044329/},
308
+ volume = {8},
309
+ year = {2022}
310
+ }"""
311
+ ),
312
+ label_classes = ["no", "partial", "yes"]
313
+ )
314
+ _BRAZILIAN_COURT_DECISIONS_UNANIMITY = {
315
+ **_BRAZILIAN_COURT_DECISIONS_JUDGMENT,
316
+ "name": "brazilian_court_decisions_unanimity",
317
+ "text_and_label_columns": ["decision_description", "unanimity_label"],
318
+ "label_classes": ["unanimity", "not-unanimity"],
319
+ }
320
+ HAREM_BASE_KWARGS = dict(
321
+ description=textwrap.dedent(
322
+ """\
323
+ The HAREM is a Portuguese language corpus commonly used for Named Entity Recognition tasks. It includes about 93k words, from 129 different texts,
324
+ from several genres, and language varieties. The split of this dataset version follows the division made by [1], where 7% HAREM
325
+ documents are the validation set and the miniHAREM corpus (with about 65k words) is the test set. There are two versions of the dataset set,
326
+ a version that has a total of 10 different named entity classes (Person, Organization, Location, Value, Date, Title, Thing, Event,
327
+ Abstraction, and Other) and a "selective" version with only 5 classes (Person, Organization, Location, Value, and Date).
328
+ It's important to note that the original version of the HAREM dataset has 2 levels of NER details, namely "Category" and "Sub-type".
329
+ The dataset version processed here ONLY USE the "Category" level of the original dataset.
330
+ [1] Souza, Fábio, Rodrigo Nogueira, and Roberto Lotufo. "BERTimbau: Pretrained BERT Models for Brazilian Portuguese."
331
+ Brazilian Conference on Intelligent Systems. Springer, Cham, 2020."""
332
+ ),
333
+ task_type="ner",
334
+ data_urls="harem",
335
+ file_type="hf_dataset",
336
+ text_and_label_columns = ["tokens", "ner_tags"],
337
+ citation=textwrap.dedent(
338
+ """\
339
+ @inproceedings{santos2006harem,
340
+ title={Harem: An advanced ner evaluation contest for portuguese},
341
+ author={Santos, Diana and Seco, Nuno and Cardoso, Nuno and Vilela, Rui},
342
+ booktitle={quot; In Nicoletta Calzolari; Khalid Choukri; Aldo Gangemi; Bente Maegaard; Joseph Mariani; Jan Odjik; Daniel Tapias (ed) Proceedings of the 5 th International Conference on Language Resources and Evaluation (LREC'2006)(Genoa Italy 22-28 May 2006)},
343
+ year={2006}
344
+ }"""
345
+ ),
346
+ url="https://www.linguateca.pt/primeiroHAREM/harem_coleccaodourada_en.html",
347
+ )
348
+ HAREM_DEFAULT_KWARGS = dict(
349
+ name = "harem-default",
350
+ hf_config_name = "default",
351
+ label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR", "ABSTRACCAO", "ACONTECIMENTO", "COISA", "OBRA", "OUTRO"],
352
+ **HAREM_BASE_KWARGS
353
+ )
354
+ HAREM_SELECTIVE_KWARGS = dict(
355
+ name = "harem-selective",
356
+ hf_config_name = "selective",
357
+ label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR"],
358
+ **HAREM_BASE_KWARGS
359
+ )
360
+
361
  class PTBenchmarkConfig(datasets.BuilderConfig):
362
  """BuilderConfig for PTBenchmark."""
363
 
364
  def __init__(
365
  self,
366
  task_type: str,
367
+ data_urls: Union[str, Dict[str, str]],
368
  citation: str,
369
  url: str,
370
  label_classes: Optional[List[Union[str, int]]] = None,
 
372
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
373
  indexes_url: Optional[str] = None, #indexes for train, dev and test for single file datasets
374
  process_label: Optional[Callable[[str], str]] = lambda x: x,
375
+ hf_config_name = "default",
376
  **kwargs,
377
  ):
378
  """BuilderConfig for GLUE.
 
403
  self.text_and_label_columns = text_and_label_columns
404
  self.indexes_url = indexes_url
405
  self.process_label = process_label
406
+ self.hf_config_name = hf_config_name
407
 
408
  def _get_classification_features(config: PTBenchmarkConfig):
409
  return datasets.Features(
 
538
  class PTBenchmark(datasets.GeneratorBasedBuilder):
539
  BUILDER_CONFIGS = [
540
  PTBenchmarkConfig(
541
+ **CONFIG_KWARGS
542
+ ) \
543
+ for CONFIG_KWARGS in \
544
+ [_LENERBR_KWARGS, _ASSIN2_RTE_KWARGS, _ASSIN2_STS_KWARGS, _HATEBR_KWARGS, \
545
+ _ULYSSESNER_PL_KWARGS, _ULYSSESNER_C_KWARGS, _ULYSSESNER_PL_TIPOS_KWARGS, \
546
+ _ULYSSESNER_C_TIPOS_KWARGS, _BRAZILIAN_COURT_DECISIONS_JUDGMENT, _BRAZILIAN_COURT_DECISIONS_UNANIMITY, \
547
+ HAREM_DEFAULT_KWARGS, HAREM_SELECTIVE_KWARGS]
 
 
 
 
 
 
 
548
  ]
549
 
550
  def _info(self) -> datasets.DatasetInfo:
 
567
  )
568
 
569
  def _split_generators(self, dl_manager: datasets.DownloadManager):
570
+ if self.config.file_type == 'hf_dataset':
571
+ return [
572
+ datasets.SplitGenerator(
573
+ name=split,
574
+ gen_kwargs={"split": split}, # These kwargs will be passed to _generate_examples
575
+ )
576
+ for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]
577
+ ]
578
  data_urls = self.config.data_urls.copy()
579
  if self.config.indexes_url is not None:
580
  data_urls['indexes'] = self.config.indexes_url
 
618
  split: Optional[str] = None
619
  ):
620
  logger.info("⏳ Generating examples from = %s", file_path)
621
+ if self.config.file_type == "hf_dataset":
622
+ dataset = datasets.load_dataset(self.config.data_urls, self.config.hf_config_name, split=split)
623
+ text_col, label_col = self.config.text_and_label_columns
624
+
625
  if self.config.task_type == "classification":
626
  if self.config.file_type == "csv":
627
  yield from _csv_generator(
 
630
  indexes_path=indexes_path,
631
  split=split
632
  )
633
+ elif self.config.file_type == "hf_dataset":
634
+ for id, item in enumerate(dataset):
635
+ label = item[label_col]
636
+ if label not in self.config.label_classes:
637
+ continue # filter out invalid classes to construct ClassLabel
638
+ if isinstance(dataset.features[label_col], ClassLabel):
639
+ label = dataset.features[label_col].int2str(label)
640
+ yield id, {
641
+ "idx": id,
642
+ "sentence": item[text_col],
643
+ "label": self.config.process_label(label),
644
+ }
645
  elif self.config.task_type == "ner":
646
+ if self.config.file_type == "hf_dataset":
647
+ for id, item in enumerate(dataset):
648
+ tags = item[label_col]
649
+ if isinstance(dataset.features[label_col], ClassLabel):
650
+ for i in range(len(tags)):
651
+ tags[i] = self.config.process_label(dataset.features[label_col].int2str(tags[i]))
652
+ yield id, {
653
+ "idx": id,
654
+ "tokens": item[text_col],
655
+ "ner_tags": tags,
656
+ }
657
+ else:
658
+ yield from _conll_ner_generator(file_path, self.config)
659
  elif self.config.task_type == "rte":
660
  if "assin2" in self.config.name:
661
  yield from _assin2_generator(file_path, self.config)