eduagarcia commited on
Commit
640c6d6
·
1 Parent(s): 69440bb

PT subsets of MAPA and MULTIEURLEX

Browse files
Files changed (1) hide show
  1. portuguese_benchmark.py +169 -40
portuguese_benchmark.py CHANGED
@@ -232,7 +232,7 @@ _ULYSSESNER_META_KWARGS = dict(
232
  url="https://github.com/ulysses-camara/ulysses-ner-br",
233
  )
234
  _ULYSSESNER_PL_KWARGS = dict(
235
- name = "UlyssesNER-Br-PL",
236
  data_urls = {
237
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/train.txt",
238
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/valid.txt",
@@ -242,7 +242,7 @@ _ULYSSESNER_PL_KWARGS = dict(
242
  **_ULYSSESNER_META_KWARGS
243
  )
244
  _ULYSSESNER_C_KWARGS = dict(
245
- name = "UlyssesNER-Br-C",
246
  data_urls = {
247
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/train.txt",
248
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/valid.txt",
@@ -253,7 +253,7 @@ _ULYSSESNER_C_KWARGS = dict(
253
  )
254
 
255
  _ULYSSESNER_PL_TIPOS_KWARGS = dict(
256
- name = "UlyssesNER-Br-PL-tipos",
257
  data_urls = {
258
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/train.txt",
259
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/valid.txt",
@@ -265,7 +265,7 @@ _ULYSSESNER_PL_TIPOS_KWARGS = dict(
265
  **_ULYSSESNER_META_KWARGS
266
  )
267
  _ULYSSESNER_C_TIPOS_KWARGS = dict(
268
- name = "UlyssesNER-Br-C-tipos",
269
  data_urls = {
270
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/train.txt",
271
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/valid.txt",
@@ -347,17 +347,102 @@ HAREM_BASE_KWARGS = dict(
347
  )
348
  HAREM_DEFAULT_KWARGS = dict(
349
  name = "harem-default",
350
- hf_config_name = "default",
351
  label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR", "ABSTRACCAO", "ACONTECIMENTO", "COISA", "OBRA", "OUTRO"],
352
  **HAREM_BASE_KWARGS
353
  )
354
  HAREM_SELECTIVE_KWARGS = dict(
355
  name = "harem-selective",
356
- hf_config_name = "selective",
357
  label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR"],
358
  **HAREM_BASE_KWARGS
359
  )
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  class PTBenchmarkConfig(datasets.BuilderConfig):
362
  """BuilderConfig for PTBenchmark."""
363
 
@@ -371,8 +456,9 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
371
  file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
372
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
373
  indexes_url: Optional[str] = None, #indexes for train, dev and test for single file datasets
374
- process_label: Optional[Callable[[str], str]] = lambda x: x,
375
- hf_config_name = "default",
 
376
  **kwargs,
377
  ):
378
  """BuilderConfig for GLUE.
@@ -403,7 +489,8 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
403
  self.text_and_label_columns = text_and_label_columns
404
  self.indexes_url = indexes_url
405
  self.process_label = process_label
406
- self.hf_config_name = hf_config_name
 
407
 
408
  def _get_classification_features(config: PTBenchmarkConfig):
409
  return datasets.Features(
@@ -414,6 +501,17 @@ def _get_classification_features(config: PTBenchmarkConfig):
414
  }
415
  )
416
 
 
 
 
 
 
 
 
 
 
 
 
417
  def _get_ner_features(config: PTBenchmarkConfig):
418
  bio_labels = ["O"]
419
  for label_name in config.label_classes:
@@ -534,6 +632,56 @@ def _assin2_generator(file_path, config: PTBenchmarkConfig):
534
  yield id_, example
535
  id_ += 1
536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537
 
538
  class PTBenchmark(datasets.GeneratorBasedBuilder):
539
  BUILDER_CONFIGS = [
@@ -541,16 +689,19 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
541
  **CONFIG_KWARGS
542
  ) \
543
  for CONFIG_KWARGS in \
544
- [_LENERBR_KWARGS, _ASSIN2_RTE_KWARGS, _ASSIN2_STS_KWARGS, _HATEBR_KWARGS, \
545
- _ULYSSESNER_PL_KWARGS, _ULYSSESNER_C_KWARGS, _ULYSSESNER_PL_TIPOS_KWARGS, \
546
- _ULYSSESNER_C_TIPOS_KWARGS, _BRAZILIAN_COURT_DECISIONS_JUDGMENT, _BRAZILIAN_COURT_DECISIONS_UNANIMITY, \
547
- HAREM_DEFAULT_KWARGS, HAREM_SELECTIVE_KWARGS]
 
548
  ]
549
 
550
  def _info(self) -> datasets.DatasetInfo:
551
  features = None
552
  if self.config.task_type == "classification":
553
  features = _get_classification_features(self.config)
 
 
554
  elif self.config.task_type == "ner":
555
  features = _get_ner_features(self.config)
556
  elif self.config.task_type == "rte":
@@ -619,8 +770,8 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
619
  ):
620
  logger.info("⏳ Generating examples from = %s", file_path)
621
  if self.config.file_type == "hf_dataset":
622
- dataset = datasets.load_dataset(self.config.data_urls, self.config.hf_config_name, split=split)
623
- text_col, label_col = self.config.text_and_label_columns
624
 
625
  if self.config.task_type == "classification":
626
  if self.config.file_type == "csv":
@@ -630,32 +781,10 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
630
  indexes_path=indexes_path,
631
  split=split
632
  )
633
- elif self.config.file_type == "hf_dataset":
634
- for id, item in enumerate(dataset):
635
- label = item[label_col]
636
- if label not in self.config.label_classes:
637
- continue # filter out invalid classes to construct ClassLabel
638
- if isinstance(dataset.features[label_col], ClassLabel):
639
- label = dataset.features[label_col].int2str(label)
640
- yield id, {
641
- "idx": id,
642
- "sentence": item[text_col],
643
- "label": self.config.process_label(label),
644
- }
645
  elif self.config.task_type == "ner":
646
- if self.config.file_type == "hf_dataset":
647
- for id, item in enumerate(dataset):
648
- tags = item[label_col]
649
- if isinstance(dataset.features[label_col], ClassLabel):
650
- for i in range(len(tags)):
651
- tags[i] = self.config.process_label(dataset.features[label_col].int2str(tags[i]))
652
- yield id, {
653
- "idx": id,
654
- "tokens": item[text_col],
655
- "ner_tags": tags,
656
- }
657
- else:
658
- yield from _conll_ner_generator(file_path, self.config)
659
  elif self.config.task_type == "rte":
660
  if "assin2" in self.config.name:
661
  yield from _assin2_generator(file_path, self.config)
 
232
  url="https://github.com/ulysses-camara/ulysses-ner-br",
233
  )
234
  _ULYSSESNER_PL_KWARGS = dict(
235
+ name = "UlyssesNER-Br-PL-coarse",
236
  data_urls = {
237
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/train.txt",
238
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_categorias/valid.txt",
 
242
  **_ULYSSESNER_META_KWARGS
243
  )
244
  _ULYSSESNER_C_KWARGS = dict(
245
+ name = "UlyssesNER-Br-C-coarse",
246
  data_urls = {
247
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/train.txt",
248
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_categorias/valid.txt",
 
253
  )
254
 
255
  _ULYSSESNER_PL_TIPOS_KWARGS = dict(
256
+ name = "UlyssesNER-Br-PL-fine",
257
  data_urls = {
258
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/train.txt",
259
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/PL_corpus_conll/pl_corpus_tipos/valid.txt",
 
265
  **_ULYSSESNER_META_KWARGS
266
  )
267
  _ULYSSESNER_C_TIPOS_KWARGS = dict(
268
+ name = "UlyssesNER-Br-C-fine",
269
  data_urls = {
270
  "train": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/train.txt",
271
  "validation": "https://github.com/ulysses-camara/ulysses-ner-br/raw/main/annotated-corpora/C_corpus_conll/c_corpus_tipos/valid.txt",
 
347
  )
348
  HAREM_DEFAULT_KWARGS = dict(
349
  name = "harem-default",
350
+ extra_configs = {"name": "default"},
351
  label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR", "ABSTRACCAO", "ACONTECIMENTO", "COISA", "OBRA", "OUTRO"],
352
  **HAREM_BASE_KWARGS
353
  )
354
  HAREM_SELECTIVE_KWARGS = dict(
355
  name = "harem-selective",
356
+ extra_configs = {"name": "selective"},
357
  label_classes = ["PESSOA", "ORGANIZACAO", "LOCAL", "TEMPO", "VALOR"],
358
  **HAREM_BASE_KWARGS
359
  )
360
 
361
+ _MAPA_BASE_KWARGS = dict(
362
+ task_type = "ner",
363
+ data_urls = "joelito/mapa",
364
+ file_type="hf_dataset",
365
+ url = "",
366
+ description =textwrap.dedent(
367
+ """\
368
+ The dataset consists of 12 documents (9 for Spanish due to parsing errors) taken from EUR-Lex,
369
+ a multilingual corpus of court decisions and legal dispositions in the 24 official languages
370
+ of the European Union. The documents have been annotated for named entities following the
371
+ guidelines of the MAPA project which foresees two annotation level, a general and a more
372
+ fine-grained one. The annotated corpus can be used for named entity recognition/classification."""
373
+ ),
374
+ citation = textwrap.dedent(
375
+ """\
376
+ @article{DeGibertBonet2022,
377
+ author = {{de Gibert Bonet}, Ona and {Garc{\'{i}}a Pablos}, Aitor and Cuadros, Montse and Melero, Maite},
378
+ journal = {Proceedings of the Language Resources and Evaluation Conference},
379
+ number = {June},
380
+ pages = {3751--3760},
381
+ title = {{Spanish Datasets for Sensitive Entity Detection in the Legal Domain}},
382
+ url = {https://aclanthology.org/2022.lrec-1.400},
383
+ year = {2022}
384
+ }"""
385
+ )
386
+ )
387
+ _MAPA_BASE_KWARGS['filter'] = lambda item: item["language"] == "pt"
388
+ _MAPA_COARSE_KWARGS = dict(
389
+ name = "mapa_pt_coarse",
390
+ text_and_label_columns = ["tokens", "coarse_grained"],
391
+ label_classes = ['ADDRESS', 'AMOUNT', 'DATE', 'ORGANISATION', 'PERSON', 'TIME'],
392
+ **_MAPA_BASE_KWARGS
393
+ )
394
+
395
+ _MAPA_FINE_KWARGS = dict(
396
+ name = "mapa_pt_fine",
397
+ text_and_label_columns = ["tokens", "fine_grained"],
398
+ label_classes = ['AGE', 'BUILDING', 'CITY', 'COUNTRY', 'DAY', 'ETHNIC CATEGORY',
399
+ 'FAMILY NAME', 'INITIAL NAME', 'MARITAL STATUS', 'MONTH', 'NATIONALITY',
400
+ 'PLACE', 'PROFESSION', 'ROLE', 'STANDARD ABBREVIATION', 'TERRITORY',
401
+ 'TITLE', 'TYPE', 'UNIT', 'URL', 'VALUE', 'YEAR'],
402
+ **_MAPA_BASE_KWARGS
403
+ )
404
+
405
+
406
+ _MULTIEURLEX_BASE_KWARGS = dict(
407
+ name = "multi_eurlex_pt",
408
+ task_type = "multilabel_classification",
409
+ data_urls = "multi_eurlex",
410
+ file_type="hf_dataset",
411
+ extra_configs = {"language": "pt", "label_level": "level_1"},
412
+ text_and_label_columns = ["text", "labels"],
413
+ url = "https://github.com/nlpaueb/MultiEURLEX/",
414
+ description =textwrap.dedent(
415
+ """\
416
+ MultiEURLEX comprises 65k EU laws in 23 official EU languages.
417
+ Each EU law has been annotated with EUROVOC concepts (labels) by the Publication Office of EU.
418
+ Each EUROVOC label ID is associated with a label descriptor, e.g., [60, agri-foodstuffs],
419
+ [6006, plant product], [1115, fruit]. The descriptors are also available in the 23 languages.
420
+ Chalkidis et al. (2019) published a monolingual (English) version of this dataset, called EUR-LEX,
421
+ comprising 57k EU laws with the originally assigned gold labels."""
422
+ ),
423
+ citation = textwrap.dedent(
424
+ """\
425
+ @InProceedings{chalkidis-etal-2021-multieurlex,
426
+ author = {Chalkidis, Ilias
427
+ and Fergadiotis, Manos
428
+ and Androutsopoulos, Ion},
429
+ title = {MultiEURLEX -- A multi-lingual and multi-label legal document
430
+ classification dataset for zero-shot cross-lingual transfer},
431
+ booktitle = {Proceedings of the 2021 Conference on Empirical Methods
432
+ in Natural Language Processing},
433
+ year = {2021},
434
+ publisher = {Association for Computational Linguistics},
435
+ location = {Punta Cana, Dominican Republic},
436
+ url = {https://arxiv.org/abs/2109.00904}
437
+ }"""
438
+ ),
439
+ label_classes = [
440
+ "100149","100160","100148","100147","100152","100143","100156",
441
+ "100158","100154","100153","100142","100145","100150","100162",
442
+ "100159","100144","100151","100157","100161","100146","100155"
443
+ ]
444
+ )
445
+
446
  class PTBenchmarkConfig(datasets.BuilderConfig):
447
  """BuilderConfig for PTBenchmark."""
448
 
 
456
  file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
457
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
458
  indexes_url: Optional[str] = None, #indexes for train, dev and test for single file datasets
459
+ process_label: Callable[[str], str] = lambda x: x,
460
+ filter: Callable = lambda x: True,
461
+ extra_configs: Dict = {},
462
  **kwargs,
463
  ):
464
  """BuilderConfig for GLUE.
 
489
  self.text_and_label_columns = text_and_label_columns
490
  self.indexes_url = indexes_url
491
  self.process_label = process_label
492
+ self.filter = filter
493
+ self.extra_configs = extra_configs
494
 
495
  def _get_classification_features(config: PTBenchmarkConfig):
496
  return datasets.Features(
 
501
  }
502
  )
503
 
504
+ def _get_multilabel_classification_features(config: PTBenchmarkConfig):
505
+ return datasets.Features(
506
+ {
507
+ "idx": datasets.Value("int32"),
508
+ "sentence": datasets.Value("string"),
509
+ "labels": datasets.Sequence(
510
+ datasets.features.ClassLabel(names=config.label_classes)
511
+ ),
512
+ }
513
+ )
514
+
515
  def _get_ner_features(config: PTBenchmarkConfig):
516
  bio_labels = ["O"]
517
  for label_name in config.label_classes:
 
632
  yield id_, example
633
  id_ += 1
634
 
635
+ def _hf_dataset_generator(split, config: PTBenchmarkConfig):
636
+ dataset = datasets.load_dataset(config.data_urls, split=split, **config.extra_configs)
637
+ feature_col, label_col = config.text_and_label_columns
638
+
639
+ target_feature_col, target_label_col = feature_col, label_col
640
+ if config.task_type == "classification":
641
+ target_feature_col, target_label_col = "sentence", "label"
642
+ elif config.task_type == "multilabel_classification":
643
+ target_feature_col, target_label_col = "sentence", "labels"
644
+ elif config.task_type == "ner":
645
+ target_feature_col, target_label_col = "tokens", "ner_tags"
646
+
647
+ for id, item in enumerate(dataset):
648
+ #filter invalid items
649
+ if not config.filter(item):
650
+ continue
651
+
652
+ label = item[label_col]
653
+ #Convert label to original text
654
+ if isinstance(dataset.features[label_col], ClassLabel):
655
+ if isinstance(label, list):
656
+ label = [dataset.features[label_col].int2str(l) for l in label]
657
+ else:
658
+ label = dataset.features[label_col].int2str(label)
659
+
660
+ #Process label
661
+ if isinstance(label, list):
662
+ label = [config.process_label(l) for l in label]
663
+ else:
664
+ label = config.process_label(label)
665
+
666
+ #Filter out invalid classes
667
+ if config.task_type != "ner":
668
+ if isinstance(label, list):
669
+ invalid = False
670
+ for i in range(len(label)):
671
+ if label[i] not in config.label_classes:
672
+ invalid = True
673
+ break
674
+ if invalid:
675
+ continue
676
+ else:
677
+ if label not in config.label_classes:
678
+ continue
679
+
680
+ yield id, {
681
+ "idx": id,
682
+ target_feature_col: item[feature_col],
683
+ target_label_col: label,
684
+ }
685
 
686
  class PTBenchmark(datasets.GeneratorBasedBuilder):
687
  BUILDER_CONFIGS = [
 
689
  **CONFIG_KWARGS
690
  ) \
691
  for CONFIG_KWARGS in \
692
+ [_LENERBR_KWARGS, _ASSIN2_RTE_KWARGS, _ASSIN2_STS_KWARGS, _HATEBR_KWARGS,
693
+ _ULYSSESNER_PL_KWARGS, _ULYSSESNER_C_KWARGS, _ULYSSESNER_PL_TIPOS_KWARGS,
694
+ _ULYSSESNER_C_TIPOS_KWARGS, _BRAZILIAN_COURT_DECISIONS_JUDGMENT,
695
+ _BRAZILIAN_COURT_DECISIONS_UNANIMITY, HAREM_DEFAULT_KWARGS, HAREM_SELECTIVE_KWARGS,
696
+ _MULTIEURLEX_BASE_KWARGS, _MAPA_COARSE_KWARGS, _MAPA_FINE_KWARGS]
697
  ]
698
 
699
  def _info(self) -> datasets.DatasetInfo:
700
  features = None
701
  if self.config.task_type == "classification":
702
  features = _get_classification_features(self.config)
703
+ elif self.config.task_type == "multilabel_classification":
704
+ features = _get_multilabel_classification_features(self.config)
705
  elif self.config.task_type == "ner":
706
  features = _get_ner_features(self.config)
707
  elif self.config.task_type == "rte":
 
770
  ):
771
  logger.info("⏳ Generating examples from = %s", file_path)
772
  if self.config.file_type == "hf_dataset":
773
+ yield from _hf_dataset_generator(split, self.config)
774
+ return
775
 
776
  if self.config.task_type == "classification":
777
  if self.config.file_type == "csv":
 
781
  indexes_path=indexes_path,
782
  split=split
783
  )
784
+ elif self.config.task_type == "multilabel_classification":
785
+ pass
 
 
 
 
 
 
 
 
 
 
786
  elif self.config.task_type == "ner":
787
+ yield from _conll_ner_generator(file_path, self.config)
 
 
 
 
 
 
 
 
 
 
 
 
788
  elif self.config.task_type == "rte":
789
  if "assin2" in self.config.name:
790
  yield from _assin2_generator(file_path, self.config)