Support streaming

#2
by albertvillanova HF staff - opened
Files changed (2) hide show
  1. data.zip +3 -0
  2. hate_speech18.py +10 -24
data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405c9bf2a8fb08ba7d26d34e7aa32d53830da0eec1e6a9c82b2aa7f19b11104a
3
+ size 3185028
hate_speech18.py CHANGED
@@ -47,7 +47,7 @@ forums posts have been sampled from several subforums and split into sentences.
47
  have been manually labelled as containing hate speech or not, according to certain annotation guidelines.
48
  """
49
 
50
- _DATA_URL = "https://github.com/Vicomtech/hate-speech-dataset/archive/master.zip"
51
 
52
 
53
  class HateSpeech18(datasets.GeneratorBasedBuilder):
@@ -75,35 +75,21 @@ class HateSpeech18(datasets.GeneratorBasedBuilder):
75
 
76
  return [
77
  datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir, "hate-speech-dataset-master")}
79
  ),
80
  ]
81
 
82
- def _generate_examples(self, filepath):
83
-
84
- with open(os.path.join(filepath, "annotations_metadata.csv"), encoding="utf-8") as csv_file:
85
-
86
- csv_reader = csv.reader(
87
  csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
88
  )
89
-
90
- next(csv_reader)
91
-
92
  for idx, row in enumerate(csv_reader):
93
-
94
- file_id, user_id, subforum_id, num_contexts, label = row
95
-
96
- all_files_path = os.path.join(filepath, "all_files")
97
-
98
- path = os.path.join(all_files_path, file_id + ".txt")
99
-
100
- with open(path, encoding="utf-8") as file:
101
- text = file.read()
102
-
103
  yield idx, {
104
  "text": text,
105
- "user_id": user_id,
106
- "subforum_id": subforum_id,
107
- "num_contexts": num_contexts,
108
- "label": label,
109
  }
 
47
  have been manually labelled as containing hate speech or not, according to certain annotation guidelines.
48
  """
49
 
50
+ _DATA_URL = "data.zip"
51
 
52
 
53
  class HateSpeech18(datasets.GeneratorBasedBuilder):
 
75
 
76
  return [
77
  datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN, gen_kwargs={"data_dir": os.path.join(dl_dir, "data")}
79
  ),
80
  ]
81
 
82
+ def _generate_examples(self, data_dir):
83
+ all_files_path = os.path.join(data_dir, "all_files")
84
+ with open(os.path.join(data_dir, "annotations_metadata.csv"), encoding="utf-8") as csv_file:
85
+ csv_reader = csv.DictReader(
 
86
  csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
87
  )
 
 
 
88
  for idx, row in enumerate(csv_reader):
89
+ text_path = os.path.join(all_files_path, row.pop("file_id") + ".txt")
90
+ with open(text_path, encoding="utf-8") as text_file:
91
+ text = text_file.read()
 
 
 
 
 
 
 
92
  yield idx, {
93
  "text": text,
94
+ **row,
 
 
 
95
  }