Datasets:
Tasks:
Text Classification
Modalities:
Text
Sub-tasks:
hate-speech-detection
Languages:
Portuguese
Size:
1K - 10K
Tags:
instagram
DOI:
ruanchaves
commited on
Commit
·
22d3e5b
1
Parent(s):
835dc8b
add specialists
Browse files
hatebr.py
CHANGED
@@ -17,7 +17,8 @@ _DESCRIPTION = """
|
|
17 |
HateBR is the first large-scale expert annotated corpus of Brazilian Instagram comments for hate speech and offensive language detection on the web and social media. The HateBR corpus was collected from Brazilian Instagram comments of politicians and manually annotated by specialists. It is composed of 7,000 documents annotated according to three different layers: a binary classification (offensive versus non-offensive comments), offensiveness-level (highly, moderately, and slightly offensive messages), and nine hate speech groups (xenophobia, racism, homophobia, sexism, religious intolerance, partyism, apology for the dictatorship, antisemitism, and fatphobia). Each comment was annotated by three different annotators and achieved high inter-annotator agreement. Furthermore, baseline experiments were implemented reaching 85% of F1-score outperforming the current literature models for the Portuguese language. Accordingly, we hope that the proposed expertly annotated corpus may foster research on hate speech and offensive language detection in the Natural Language Processing area.
|
18 |
"""
|
19 |
_URLS = {
|
20 |
-
"train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv"
|
|
|
21 |
}
|
22 |
|
23 |
_LABEL_INT_KEY = {
|
@@ -46,6 +47,9 @@ class Boun(datasets.GeneratorBasedBuilder):
|
|
46 |
{
|
47 |
"instagram_comments": datasets.Value("string"),
|
48 |
"offensive_language": datasets.Value("bool"),
|
|
|
|
|
|
|
49 |
"offensiveness_levels": datasets.Value("int32"),
|
50 |
"antisemitism": datasets.Value("bool"),
|
51 |
"apology_for_the_dictatorship": datasets.Value("bool"),
|
@@ -67,18 +71,29 @@ class Boun(datasets.GeneratorBasedBuilder):
|
|
67 |
def _split_generators(self, dl_manager):
|
68 |
downloaded_files = dl_manager.download(_URLS)
|
69 |
return [
|
70 |
-
datasets.SplitGenerator(
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
]
|
72 |
|
73 |
-
def _generate_examples(self, filepath):
|
74 |
-
def process_row(row):
|
75 |
categories = row["hate_speech"].split(",")
|
76 |
del row["hate_speech"]
|
77 |
for default_label in _LABEL_INT_KEY.keys():
|
78 |
row[default_label] = False
|
79 |
for int_label in categories:
|
80 |
row[_INT_LABEL_KEY[int(int_label)]] = True
|
|
|
|
|
|
|
|
|
81 |
return row
|
82 |
records = pd.read_csv(filepath).to_dict("records")
|
|
|
83 |
for idx, row in enumerate(records):
|
84 |
-
yield idx, process_row(row)
|
|
|
17 |
HateBR is the first large-scale expert annotated corpus of Brazilian Instagram comments for hate speech and offensive language detection on the web and social media. The HateBR corpus was collected from Brazilian Instagram comments of politicians and manually annotated by specialists. It is composed of 7,000 documents annotated according to three different layers: a binary classification (offensive versus non-offensive comments), offensiveness-level (highly, moderately, and slightly offensive messages), and nine hate speech groups (xenophobia, racism, homophobia, sexism, religious intolerance, partyism, apology for the dictatorship, antisemitism, and fatphobia). Each comment was annotated by three different annotators and achieved high inter-annotator agreement. Furthermore, baseline experiments were implemented reaching 85% of F1-score outperforming the current literature models for the Portuguese language. Accordingly, we hope that the proposed expertly annotated corpus may foster research on hate speech and offensive language detection in the Natural Language Processing area.
|
18 |
"""
|
19 |
_URLS = {
|
20 |
+
"train": "https://raw.githubusercontent.com/franciellevargas/HateBR/2d18c5b9410c2dfdd6d5394caa54d608857dae7c/dataset/HateBR.csv",
|
21 |
+
"annotators": "https://raw.githubusercontent.com/franciellevargas/HateBR/9c083381ec005778deffd14c3454a3b638b734d7/annotators/concordancia_Kappa_Fleiss.csv"
|
22 |
}
|
23 |
|
24 |
_LABEL_INT_KEY = {
|
|
|
47 |
{
|
48 |
"instagram_comments": datasets.Value("string"),
|
49 |
"offensive_language": datasets.Value("bool"),
|
50 |
+
"specialist_1_offensive_language": datasets.Value("bool"),
|
51 |
+
"specialist_2_offensive_language": datasets.Value("bool"),
|
52 |
+
"specialist_3_offensive_language": datasets.Value("bool"),
|
53 |
"offensiveness_levels": datasets.Value("int32"),
|
54 |
"antisemitism": datasets.Value("bool"),
|
55 |
"apology_for_the_dictatorship": datasets.Value("bool"),
|
|
|
71 |
def _split_generators(self, dl_manager):
|
72 |
downloaded_files = dl_manager.download(_URLS)
|
73 |
return [
|
74 |
+
datasets.SplitGenerator(
|
75 |
+
name=datasets.Split.TRAIN,
|
76 |
+
gen_kwargs={
|
77 |
+
"filepath": downloaded_files["train"],
|
78 |
+
"annotators": downloaded_files["annotators"]
|
79 |
+
}
|
80 |
+
)
|
81 |
]
|
82 |
|
83 |
+
def _generate_examples(self, filepath, annotators):
|
84 |
+
def process_row(row, annotator_row):
|
85 |
categories = row["hate_speech"].split(",")
|
86 |
del row["hate_speech"]
|
87 |
for default_label in _LABEL_INT_KEY.keys():
|
88 |
row[default_label] = False
|
89 |
for int_label in categories:
|
90 |
row[_INT_LABEL_KEY[int(int_label)]] = True
|
91 |
+
|
92 |
+
row["specialist_1_offensive_language"] = bool(int(annotator_row["Avaliador 1"]))
|
93 |
+
row["specialist_2_offensive_language"] = bool(int(annotator_row["Avaliador 2"]))
|
94 |
+
row["specialist_3_offensive_language"] = bool(int(annotator_row["Avaliador 3"]))
|
95 |
return row
|
96 |
records = pd.read_csv(filepath).to_dict("records")
|
97 |
+
annotators = pd.read_csv(annotators).to_dict("records")
|
98 |
for idx, row in enumerate(records):
|
99 |
+
yield idx, process_row(row, annotators[idx])
|