antypasd commited on
Commit
d84685a
·
1 Parent(s): 1939eed

added disambiguation val set

Browse files
data/tweet_disambiguation/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
super_tweet_eval.py CHANGED
@@ -122,7 +122,8 @@ _TWEET_HATE_CITATION = """\
122
  abstract = "We introduce the Measuring Hate Speech corpus, a dataset created to measure hate speech while adjusting for annotators{'} perspectives. It consists of 50,070 social media comments spanning YouTube, Reddit, and Twitter, labeled by 11,143 annotators recruited from Amazon Mechanical Turk. Each observation includes 10 ordinal labels: sentiment, disrespect, insult, attacking/defending, humiliation, inferior/superior status, dehumanization, violence, genocide, and a 3-valued hate speech benchmark label. The labels are aggregated using faceted Rasch measurement theory (RMT) into a continuous score that measures each comment{'}s location on a hate speech spectrum. The annotation experimental design assigned comments to multiple annotators in order to yield a linked network, allowing annotator disagreement (perspective) to be statistically summarized. Annotators{'} labeling strictness was estimated during the RMT scaling, projecting their perspective onto a linear measure that was adjusted for the hate speech score. Models that incorporate this annotator perspective parameter as an auxiliary input can generate label- and score-level predictions conditional on annotator perspective. The corpus includes the identity group targets of each comment (8 groups, 42 subgroups) and annotator demographics (6 groups, 40 subgroups), facilitating analyses of interactions between annotator- and comment-level identities, i.e. identity-related annotator perspective.",
123
  }
124
  """
125
-
 
126
 
127
 
128
  class SuperTweetEvalConfig(datasets.BuilderConfig):
@@ -204,6 +205,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
204
  features=['gold_label', 'text'],
205
  data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_hate",
206
  )
 
 
 
 
 
 
 
207
  ]
208
 
209
  def _info(self):
@@ -236,6 +244,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
236
  'not_hate']
237
  features["gold_label"] = datasets.Value("int32")
238
  features["text"] = datasets.Value("string")
 
 
 
 
 
 
 
239
 
240
  return datasets.DatasetInfo(
241
  description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
 
122
  abstract = "We introduce the Measuring Hate Speech corpus, a dataset created to measure hate speech while adjusting for annotators{'} perspectives. It consists of 50,070 social media comments spanning YouTube, Reddit, and Twitter, labeled by 11,143 annotators recruited from Amazon Mechanical Turk. Each observation includes 10 ordinal labels: sentiment, disrespect, insult, attacking/defending, humiliation, inferior/superior status, dehumanization, violence, genocide, and a 3-valued hate speech benchmark label. The labels are aggregated using faceted Rasch measurement theory (RMT) into a continuous score that measures each comment{'}s location on a hate speech spectrum. The annotation experimental design assigned comments to multiple annotators in order to yield a linked network, allowing annotator disagreement (perspective) to be statistically summarized. Annotators{'} labeling strictness was estimated during the RMT scaling, projecting their perspective onto a linear measure that was adjusted for the hate speech score. Models that incorporate this annotator perspective parameter as an auxiliary input can generate label- and score-level predictions conditional on annotator perspective. The corpus includes the identity group targets of each comment (8 groups, 42 subgroups) and annotator demographics (6 groups, 40 subgroups), facilitating analyses of interactions between annotator- and comment-level identities, i.e. identity-related annotator perspective.",
123
  }
124
  """
125
+ _TWEET_DISAMBIGUATION_DESCRIPTION = """TBA"""
126
+ _TWEET_DISAMBIGUATION_CITATION = """TBA"""
127
 
128
 
129
  class SuperTweetEvalConfig(datasets.BuilderConfig):
 
205
  features=['gold_label', 'text'],
206
  data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_hate",
207
  )
208
+ SuperTweetEvalConfig(
209
+ name="tweet_disambiguation",
210
+ description=_TWEET_DISAMBIGUATION_DESCRIPTION,
211
+ citation=_TWEET_DISAMBIGUATION_CITATION,
212
+ features=['gold_label_binary', 'target', 'context', 'definition', 'char_idx_start', 'car_idx_end']
213
+ data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_disambiguation",
214
+ )
215
  ]
216
 
217
  def _info(self):
 
244
  'not_hate']
245
  features["gold_label"] = datasets.Value("int32")
246
  features["text"] = datasets.Value("string")
247
+ if self.config.name == "tweet_disambiguation":
248
+ features['target'] = datasets.Value("string")
249
+ features['context'] = datasets.Value("string")
250
+ features['definition'] = datasets.Value("string")
251
+ features['char_idx_start'] = datasets.Value("int32")
252
+ features['car_idx_end'] = datasets.Value("int32")
253
+ features['gold_label_binary'] = datasets.Value("int32")
254
 
255
  return datasets.DatasetInfo(
256
  description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,