eduagarcia commited on
Commit
88bda97
1 Parent(s): 41b694a

Add HateBR task

Browse files
Files changed (1) hide show
  1. portuguese_benchmark.py +176 -74
portuguese_benchmark.py CHANGED
@@ -1,8 +1,10 @@
1
- import textwrap
2
  import datasets
3
  from typing import Dict, List, Optional, Union
 
 
4
 
5
  import xml.etree.ElementTree as ET
 
6
 
7
  logger = datasets.logging.get_logger(__name__)
8
 
@@ -25,7 +27,7 @@ _LENERBR_KWARGS = dict(
25
  label_classes=["ORGANIZACAO", "PESSOA", "TEMPO", "LOCAL", "LEGISLACAO", "JURISPRUDENCIA"],
26
  data_urls={
27
  "train": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/train/train.conll",
28
- "dev": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/dev/dev.conll",
29
  "test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll",
30
  },
31
  citation=textwrap.dedent(
@@ -64,7 +66,7 @@ _ASSIN2_BASE_KWARGS = dict(
64
  ),
65
  data_urls={
66
  "train": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-train-only.xml",
67
- "dev": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-dev.xml",
68
  "test": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-test.xml",
69
  },
70
  citation=textwrap.dedent(
@@ -92,18 +94,57 @@ _ASSIN2_STS_KWARGS = dict(
92
  **_ASSIN2_BASE_KWARGS
93
  )
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
  class PTBenchmarkConfig(datasets.BuilderConfig):
97
  """BuilderConfig for PTBenchmark."""
98
 
99
  def __init__(
100
  self,
101
- task_type,
102
- data_urls,
103
- citation,
104
- url,
105
- label_classes=None,
106
- process_label=lambda x: x,
 
 
107
  **kwargs,
108
  ):
109
  """BuilderConfig for GLUE.
@@ -130,57 +171,74 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
130
  self.data_urls = data_urls
131
  self.citation = citation
132
  self.url = url
133
- self.process_label = process_label
 
 
 
 
 
 
 
 
 
 
 
134
 
135
- def _get_ner_dataset_info(config):
136
  bio_labels = ["O"]
137
  for label_name in config.label_classes:
138
  bio_labels.append("B-" + label_name)
139
  bio_labels.append("I-" + label_name)
140
- return datasets.DatasetInfo(
141
- description=config.description,
142
- homepage=config.url,
143
- citation=config.citation,
144
- features=datasets.Features(
145
- {
146
- "idx": datasets.Value("int32"),
147
- "tokens": datasets.Sequence(datasets.Value("string")),
148
- "ner_tags": datasets.Sequence(
149
- datasets.features.ClassLabel(names=bio_labels)
150
- ),
151
- }
152
- )
153
- )
154
 
155
- def _get_rte_dataset_info(config):
156
- return datasets.DatasetInfo(
157
- description=config.description,
158
- homepage=config.url,
159
- citation=config.citation,
160
- features=datasets.Features(
161
- {
162
- "idx": datasets.Value("int32"),
163
- "sentence1": datasets.Value("string"),
164
- "sentence2": datasets.Value("string"),
165
- "label": datasets.features.ClassLabel(names=config.label_classes),
166
- }
167
- )
168
- )
169
 
170
- def _get_sts_dataset_info(config):
171
- return datasets.DatasetInfo(
172
- description=config.description,
173
- homepage=config.url,
174
- citation=config.citation,
175
- features=datasets.Features(
176
- {
177
- "idx": datasets.Value("int32"),
178
- "sentence1": datasets.Value("string"),
179
- "sentence2": datasets.Value("string"),
180
- "label": datasets.Value("float32"),
181
- }
182
- )
183
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
  def _conll_ner_generator(file_path):
186
  with open(file_path, encoding="utf-8") as f:
@@ -247,40 +305,84 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
247
  ),
248
  PTBenchmarkConfig(
249
  **_ASSIN2_STS_KWARGS
 
 
 
250
  )
251
  ]
252
 
253
  def _info(self) -> datasets.DatasetInfo:
254
- if self.config.task_type == "ner":
255
- return _get_ner_dataset_info(self.config)
 
 
 
256
  elif self.config.task_type == "rte":
257
- return _get_rte_dataset_info(self.config)
258
  elif self.config.task_type == "sts":
259
- return _get_sts_dataset_info(self.config)
 
 
 
 
 
 
 
 
260
 
261
  def _split_generators(self, dl_manager: datasets.DownloadManager):
262
- file_paths = dl_manager.download_and_extract(self.config.data_urls)
263
- return [
264
- datasets.SplitGenerator(
265
- name=datasets.Split.TRAIN,
266
- gen_kwargs={"file_path": file_paths["train"]},
267
- ),
268
- datasets.SplitGenerator(
269
- name=datasets.Split.VALIDATION,
270
- gen_kwargs={"file_path": file_paths["dev"]},
271
- ),
272
- datasets.SplitGenerator(
273
- name=datasets.Split.TEST,
274
- gen_kwargs={"file_path": file_paths["test"]},
275
- )
276
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
  def _generate_examples(
279
  self,
280
- file_path: Optional[str] = None
 
 
281
  ):
282
  logger.info("⏳ Generating examples from = %s", file_path)
283
- if self.config.task_type == "ner":
 
 
 
 
 
 
 
 
284
  yield from _conll_ner_generator(file_path)
285
  elif self.config.task_type == "rte":
286
  if "assin2" in self.config.name:
 
 
1
  import datasets
2
  from typing import Dict, List, Optional, Union
3
+ import json
4
+ import textwrap
5
 
6
  import xml.etree.ElementTree as ET
7
+ import pandas as pd
8
 
9
  logger = datasets.logging.get_logger(__name__)
10
 
 
27
  label_classes=["ORGANIZACAO", "PESSOA", "TEMPO", "LOCAL", "LEGISLACAO", "JURISPRUDENCIA"],
28
  data_urls={
29
  "train": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/train/train.conll",
30
+ "validation": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/dev/dev.conll",
31
  "test": "https://raw.githubusercontent.com/peluz/lener-br/master/leNER-Br/test/test.conll",
32
  },
33
  citation=textwrap.dedent(
 
66
  ),
67
  data_urls={
68
  "train": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-train-only.xml",
69
+ "validation": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-dev.xml",
70
  "test": "https://github.com/ruanchaves/assin/raw/master/sources/assin2-test.xml",
71
  },
72
  citation=textwrap.dedent(
 
94
  **_ASSIN2_BASE_KWARGS
95
  )
96
 
97
+ # Extracted from:
98
+ # - https://huggingface.co/datasets/ruanchaves/hatebr
99
+ # - https://github.com/franciellevargas/HateBR
100
+ _HATEBR_KWARGS = dict(
101
+ name = "HateBR",
102
+ description=textwrap.dedent(
103
+ """\
104
+ HateBR is the first large-scale expert annotated dataset of Brazilian Instagram comments for abusive language detection
105
+ on the web and social media. The HateBR was collected from Brazilian Instagram comments of politicians and manually annotated
106
+ by specialists. It is composed of 7,000 documents annotated according to three different layers: a binary classification (offensive
107
+ versus non-offensive comments), offensiveness-level (highly, moderately, and slightly offensive messages), and nine hate speech
108
+ groups (xenophobia, racism, homophobia, sexism, religious intolerance, partyism, apology for the dictatorship, antisemitism,
109
+ and fatphobia). Each comment was annotated by three different annotators and achieved high inter-annotator agreement. Furthermore,
110
+ baseline experiments were implemented reaching 85% of F1-score outperforming the current literature dataset baselines for
111
+ the Portuguese language. We hope that the proposed expert annotated dataset may foster research on hate speech detection in the
112
+ Natural Language Processing area."""
113
+ ),
114
+ task_type="classification",
115
+ file_type="csv",
116
+ label_classes=[0, 1, 2, 3],
117
+ data_urls={
118
+ "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv"
119
+ },
120
+ citation=textwrap.dedent(
121
+ """\
122
+ @inproceedings{vargas2022hatebr,
123
+ title={HateBR: A Large Expert Annotated Corpus of Brazilian Instagram Comments for Offensive Language and Hate Speech Detection},
124
+ author={Vargas, Francielle and Carvalho, Isabelle and de G{\'o}es, Fabiana Rodrigues and Pardo, Thiago and Benevenuto, Fabr{\'\i}cio},
125
+ booktitle={Proceedings of the Thirteenth Language Resources and Evaluation Conference},
126
+ pages={7174--7183},
127
+ year={2022}
128
+ }"""
129
+ ),
130
+ url="https://github.com/franciellevargas/HateBR",
131
+ text_and_label_columns=["instagram_comments", "offensiveness_levels"],
132
+ indexes_url="https://huggingface.co/datasets/ruanchaves/hatebr/raw/main/indexes.json"
133
+ )
134
 
135
  class PTBenchmarkConfig(datasets.BuilderConfig):
136
  """BuilderConfig for PTBenchmark."""
137
 
138
  def __init__(
139
  self,
140
+ task_type: str,
141
+ data_urls: Dict[str, str],
142
+ citation: str,
143
+ url: str,
144
+ label_classes: Optional[List[Union[str, int]]] = None,
145
+ file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
146
+ text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
147
+ indexes_url=None, #indexes for train, dev and test for single file datasets
148
  **kwargs,
149
  ):
150
  """BuilderConfig for GLUE.
 
171
  self.data_urls = data_urls
172
  self.citation = citation
173
  self.url = url
174
+ self.file_type = file_type
175
+ self.text_and_label_columns = text_and_label_columns
176
+ self.indexes_url = indexes_url
177
+
178
+ def _get_classification_features(config):
179
+ return datasets.Features(
180
+ {
181
+ "idx": datasets.Value("int32"),
182
+ "sentence": datasets.Value("string"),
183
+ "label": datasets.features.ClassLabel(names=config.label_classes),
184
+ }
185
+ )
186
 
187
+ def _get_ner_features(config):
188
  bio_labels = ["O"]
189
  for label_name in config.label_classes:
190
  bio_labels.append("B-" + label_name)
191
  bio_labels.append("I-" + label_name)
192
+ return datasets.Features(
193
+ {
194
+ "idx": datasets.Value("int32"),
195
+ "tokens": datasets.Sequence(datasets.Value("string")),
196
+ "ner_tags": datasets.Sequence(
197
+ datasets.features.ClassLabel(names=bio_labels)
198
+ ),
199
+ }
200
+ )
 
 
 
 
 
201
 
202
+ def _get_rte_features(config):
203
+ return datasets.Features(
204
+ {
205
+ "idx": datasets.Value("int32"),
206
+ "sentence1": datasets.Value("string"),
207
+ "sentence2": datasets.Value("string"),
208
+ "label": datasets.features.ClassLabel(names=config.label_classes),
209
+ }
210
+ )
 
 
 
 
 
211
 
212
+ def _get_sts_features(config):
213
+ return datasets.Features(
214
+ {
215
+ "idx": datasets.Value("int32"),
216
+ "sentence1": datasets.Value("string"),
217
+ "sentence2": datasets.Value("string"),
218
+ "label": datasets.Value("float32"),
219
+ }
220
+ )
221
+
222
+ def _csv_generator(file_path: str,
223
+ columns: List[str],
224
+ indexes_path: Optional[str] = None,
225
+ split: Optional[str] = None):
226
+ """Yields examples."""
227
+ df = pd.read_csv(file_path)
228
+ df = df[columns]
229
+
230
+ with open(indexes_path, "r") as f:
231
+ indexes= json.load(f)
232
+ split_indexes = indexes[split]
233
+ df_split = df.iloc[split_indexes]
234
+
235
+ for id_, row in df_split.iterrows():
236
+ example = {
237
+ "idx": id_,
238
+ "sentence": str(row[columns[0]]),
239
+ "label": int(row[columns[-1]])
240
+ }
241
+ yield id_, example
242
 
243
  def _conll_ner_generator(file_path):
244
  with open(file_path, encoding="utf-8") as f:
 
305
  ),
306
  PTBenchmarkConfig(
307
  **_ASSIN2_STS_KWARGS
308
+ ),
309
+ PTBenchmarkConfig(
310
+ **_HATEBR_KWARGS
311
  )
312
  ]
313
 
314
  def _info(self) -> datasets.DatasetInfo:
315
+ features = None
316
+ if self.config.task_type == "classification":
317
+ features = _get_classification_features(self.config)
318
+ elif self.config.task_type == "ner":
319
+ features = _get_ner_features(self.config)
320
  elif self.config.task_type == "rte":
321
+ features = _get_rte_features(self.config)
322
  elif self.config.task_type == "sts":
323
+ features = _get_sts_features(self.config)
324
+
325
+ return datasets.DatasetInfo(
326
+ description=self.config.description,
327
+ homepage=self.config.url,
328
+ citation=self.config.citation,
329
+ supervised_keys=None,
330
+ features=features
331
+ )
332
 
333
  def _split_generators(self, dl_manager: datasets.DownloadManager):
334
+ data_urls = self.config.data_urls.copy()
335
+ if self.config.indexes_url is not None:
336
+ data_urls['indexes'] = self.config.indexes_url
337
+ file_paths = dl_manager.download_and_extract(data_urls)
338
+
339
+ if self.config.indexes_url is None:
340
+ return [
341
+ datasets.SplitGenerator(
342
+ name=datasets.Split.TRAIN,
343
+ gen_kwargs={"file_path": file_paths["train"]},
344
+ ),
345
+ datasets.SplitGenerator(
346
+ name=datasets.Split.VALIDATION,
347
+ gen_kwargs={"file_path": file_paths["validation"]},
348
+ ),
349
+ datasets.SplitGenerator(
350
+ name=datasets.Split.TEST,
351
+ gen_kwargs={"file_path": file_paths["test"]},
352
+ )
353
+ ]
354
+ else:
355
+ return [
356
+ datasets.SplitGenerator(
357
+ name=datasets.Split.TRAIN,
358
+ gen_kwargs={"file_path": file_paths["train"], "indexes_path": file_paths["indexes"], "split": "train"},
359
+ ),
360
+ datasets.SplitGenerator(
361
+ name=datasets.Split.VALIDATION,
362
+ gen_kwargs={"file_path": file_paths["train"], "indexes_path": file_paths["indexes"], "split": "validation"},
363
+ ),
364
+ datasets.SplitGenerator(
365
+ name=datasets.Split.TEST,
366
+ gen_kwargs={"file_path": file_paths["train"], "indexes_path": file_paths["indexes"], "split": "test"},
367
+ )
368
+ ]
369
 
370
  def _generate_examples(
371
  self,
372
+ file_path: Optional[str] = None,
373
+ indexes_path: Optional[str] = None,
374
+ split: Optional[str] = None
375
  ):
376
  logger.info("⏳ Generating examples from = %s", file_path)
377
+ if self.config.task_type == "classification":
378
+ if self.config.file_type == "csv":
379
+ yield from _csv_generator(
380
+ file_path,
381
+ self.config.text_and_label_columns,
382
+ indexes_path=indexes_path,
383
+ split=split
384
+ )
385
+ elif self.config.task_type == "ner":
386
  yield from _conll_ner_generator(file_path)
387
  elif self.config.task_type == "rte":
388
  if "assin2" in self.config.name: