eduagarcia commited on
Commit
afa14ae
·
1 Parent(s): 88bda97

fix: HateBR labels

Browse files
Files changed (1) hide show
  1. portuguese_benchmark.py +29 -17
portuguese_benchmark.py CHANGED
@@ -1,5 +1,5 @@
1
  import datasets
2
- from typing import Dict, List, Optional, Union
3
  import json
4
  import textwrap
5
 
@@ -113,7 +113,7 @@ _HATEBR_KWARGS = dict(
113
  ),
114
  task_type="classification",
115
  file_type="csv",
116
- label_classes=[0, 1, 2, 3],
117
  data_urls={
118
  "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv"
119
  },
@@ -131,6 +131,13 @@ _HATEBR_KWARGS = dict(
131
  text_and_label_columns=["instagram_comments", "offensiveness_levels"],
132
  indexes_url="https://huggingface.co/datasets/ruanchaves/hatebr/raw/main/indexes.json"
133
  )
 
 
 
 
 
 
 
134
 
135
  class PTBenchmarkConfig(datasets.BuilderConfig):
136
  """BuilderConfig for PTBenchmark."""
@@ -144,7 +151,8 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
144
  label_classes: Optional[List[Union[str, int]]] = None,
145
  file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
146
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
147
- indexes_url=None, #indexes for train, dev and test for single file datasets
 
148
  **kwargs,
149
  ):
150
  """BuilderConfig for GLUE.
@@ -174,6 +182,7 @@ class PTBenchmarkConfig(datasets.BuilderConfig):
174
  self.file_type = file_type
175
  self.text_and_label_columns = text_and_label_columns
176
  self.indexes_url = indexes_url
 
177
 
178
  def _get_classification_features(config):
179
  return datasets.Features(
@@ -220,11 +229,12 @@ def _get_sts_features(config):
220
  )
221
 
222
  def _csv_generator(file_path: str,
223
- columns: List[str],
224
  indexes_path: Optional[str] = None,
225
- split: Optional[str] = None):
 
226
  """Yields examples."""
227
  df = pd.read_csv(file_path)
 
228
  df = df[columns]
229
 
230
  with open(indexes_path, "r") as f:
@@ -236,11 +246,11 @@ def _csv_generator(file_path: str,
236
  example = {
237
  "idx": id_,
238
  "sentence": str(row[columns[0]]),
239
- "label": int(row[columns[-1]])
240
  }
241
  yield id_, example
242
 
243
- def _conll_ner_generator(file_path):
244
  with open(file_path, encoding="utf-8") as f:
245
 
246
  guid = 0
@@ -261,7 +271,7 @@ def _conll_ner_generator(file_path):
261
  else:
262
  splits = line.split(" ")
263
  tokens.append(splits[0])
264
- ner_tags.append(splits[1].rstrip())
265
 
266
  # last example
267
  yield guid, {
@@ -270,7 +280,7 @@ def _conll_ner_generator(file_path):
270
  "ner_tags": ner_tags,
271
  }
272
 
273
- def _assin2_generator(file_path, type):
274
  """Yields examples."""
275
  id_ = 0
276
 
@@ -279,6 +289,8 @@ def _assin2_generator(file_path, type):
279
  tree = ET.parse(f)
280
  root = tree.getroot()
281
 
 
 
282
  for pair in root:
283
 
284
  example = {
@@ -286,10 +298,10 @@ def _assin2_generator(file_path, type):
286
  "sentence1": pair.find(".//t").text,
287
  "sentence2": pair.find(".//h").text
288
  }
289
- if type == "rte":
290
  example["label"] = pair.attrib.get("entailment").upper()
291
- elif type == "sts":
292
- example["label"] = float(pair.attrib.get("similarity"))
293
 
294
  yield id_, example
295
  id_ += 1
@@ -378,16 +390,16 @@ class PTBenchmark(datasets.GeneratorBasedBuilder):
378
  if self.config.file_type == "csv":
379
  yield from _csv_generator(
380
  file_path,
381
- self.config.text_and_label_columns,
382
  indexes_path=indexes_path,
383
- split=split
 
384
  )
385
  elif self.config.task_type == "ner":
386
- yield from _conll_ner_generator(file_path)
387
  elif self.config.task_type == "rte":
388
  if "assin2" in self.config.name:
389
- yield from _assin2_generator(file_path, "rte")
390
  elif self.config.task_type == "sts":
391
  if "assin2" in self.config.name:
392
- yield from _assin2_generator(file_path, "sts")
393
 
 
1
  import datasets
2
+ from typing import Dict, List, Optional, Union, Callable
3
  import json
4
  import textwrap
5
 
 
113
  ),
114
  task_type="classification",
115
  file_type="csv",
116
+ label_classes=["non-offensive", "slightly", "moderately", "highly"],
117
  data_urls={
118
  "train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv"
119
  },
 
131
  text_and_label_columns=["instagram_comments", "offensiveness_levels"],
132
  indexes_url="https://huggingface.co/datasets/ruanchaves/hatebr/raw/main/indexes.json"
133
  )
134
+ hatebr_map = {
135
+ "0": "non-offensive",
136
+ "1": "slightly",
137
+ "2": "moderately",
138
+ "3": "highly",
139
+ }
140
+ _HATEBR_KWARGS['process_label'] = lambda x: hatebr_map[x]
141
 
142
  class PTBenchmarkConfig(datasets.BuilderConfig):
143
  """BuilderConfig for PTBenchmark."""
 
151
  label_classes: Optional[List[Union[str, int]]] = None,
152
  file_type: Optional[str] = None, #filetype (csv, tsc, jsonl)
153
  text_and_label_columns: Optional[List[str]] = None, #columns for train, dev and test for csv datasets
154
+ indexes_url: Optional[str] = None, #indexes for train, dev and test for single file datasets
155
+ process_label: Optional[Callable[[str], str]] = lambda x: x,
156
  **kwargs,
157
  ):
158
  """BuilderConfig for GLUE.
 
182
  self.file_type = file_type
183
  self.text_and_label_columns = text_and_label_columns
184
  self.indexes_url = indexes_url
185
+ self.process_label = process_label
186
 
187
  def _get_classification_features(config):
188
  return datasets.Features(
 
229
  )
230
 
231
  def _csv_generator(file_path: str,
 
232
  indexes_path: Optional[str] = None,
233
+ split: Optional[str] = None,
234
+ config: PTBenchmarkConfig = None):
235
  """Yields examples."""
236
  df = pd.read_csv(file_path)
237
+ columns = config.text_and_label_columns
238
  df = df[columns]
239
 
240
  with open(indexes_path, "r") as f:
 
246
  example = {
247
  "idx": id_,
248
  "sentence": str(row[columns[0]]),
249
+ "label": config.process_label(str(row[columns[-1]]))
250
  }
251
  yield id_, example
252
 
253
+ def _conll_ner_generator(file_path: str, config: PTBenchmarkConfig = None):
254
  with open(file_path, encoding="utf-8") as f:
255
 
256
  guid = 0
 
271
  else:
272
  splits = line.split(" ")
273
  tokens.append(splits[0])
274
+ ner_tags.append(config.process_label(splits[1].rstrip()))
275
 
276
  # last example
277
  yield guid, {
 
280
  "ner_tags": ner_tags,
281
  }
282
 
283
+ def _assin2_generator(file_path, config: PTBenchmarkConfig = None):
284
  """Yields examples."""
285
  id_ = 0
286
 
 
289
  tree = ET.parse(f)
290
  root = tree.getroot()
291
 
292
+ task_type = config.task_type
293
+
294
  for pair in root:
295
 
296
  example = {
 
298
  "sentence1": pair.find(".//t").text,
299
  "sentence2": pair.find(".//h").text
300
  }
301
+ if task_type == "rte":
302
  example["label"] = pair.attrib.get("entailment").upper()
303
+ elif task_type == "sts":
304
+ example["label"] = float(config.process_label(pair.attrib.get("similarity")))
305
 
306
  yield id_, example
307
  id_ += 1
 
390
  if self.config.file_type == "csv":
391
  yield from _csv_generator(
392
  file_path,
 
393
  indexes_path=indexes_path,
394
+ split=split,
395
+ config=self.config
396
  )
397
  elif self.config.task_type == "ner":
398
+ yield from _conll_ner_generator(file_path, config=self.config)
399
  elif self.config.task_type == "rte":
400
  if "assin2" in self.config.name:
401
+ yield from _assin2_generator(file_path, config=self.config)
402
  elif self.config.task_type == "sts":
403
  if "assin2" in self.config.name:
404
+ yield from _assin2_generator(file_path, config=self.config)
405