EricR401S commited on
Commit
a21e949
1 Parent(s): 2cb9bff

check , title change

Browse files
reddit_dataset_loader.py → Pill-Ideologies-New-Test.py RENAMED
@@ -20,6 +20,7 @@ reddit posts dataset."""
20
  import csv
21
  import json
22
  import os
 
23
 
24
  import datasets
25
 
@@ -217,6 +218,40 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
217
  "date": data["date"],
218
  }
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
221
  # def _generate_examples(self, filepath, split):
222
  # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
 
20
  import csv
21
  import json
22
  import os
23
+ import pandas as pd
24
 
25
  import datasets
26
 
 
218
  "date": data["date"],
219
  }
220
 
221
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
222
+ # def _generate_examples(self, filepath, split):
223
+ # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
224
+ # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
225
+ # print(filepath, "is the filepath")
226
+ # with open(filepath, encoding="utf-8") as f:
227
+ # print(f, "is the file")
228
+ # for key, row in enumerate(f):
229
+ # data = json.loads(row)
230
+ # print(data, "is the data")
231
+ # if self.config.name == "first_domain":
232
+ # # Yields examples as (key, example) tuples
233
+ # yield key, {
234
+ # "subreddit": data["subreddit"],
235
+ # "id": data["id"],
236
+ # "title": data["title"],
237
+ # "text": data["text"],
238
+ # "url": data["url"],
239
+ # "score": data["score"],
240
+ # "author": data["author"],
241
+ # "date": data["date"],
242
+ # }
243
+ # else:
244
+ # yield key, {
245
+ # "subreddit": data["subreddit"],
246
+ # "id": data["id"],
247
+ # "title": data["title"],
248
+ # "text": data["text"],
249
+ # "url": data["url"],
250
+ # "score": data["score"],
251
+ # "author": data["author"],
252
+ # "date": data["date"],
253
+ # }
254
+
255
  # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
256
  # def _generate_examples(self, filepath, split):
257
  # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
make_splits.py ADDED
File without changes
reddit_dataset_load.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+
16
+ """This script's purpose is to re-define the datset loading functions to better suit this specific
17
+ reddit posts dataset."""
18
+
19
+
20
+ import csv
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ # TODO: Add BibTeX citation
28
+ # Find for instance the citation on arxiv or on the dataset repo/website
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {Pill Ideologies Subreddits Dataset},
32
+ author={Eric Rios},
33
+ year={2024}
34
+ source = {reddit.com}
35
+ }
36
+
37
+ """
38
+
39
+ # TODO: Add description of the dataset here
40
+ # You can copy an official description
41
+ _DESCRIPTION = """\
42
+ This new dataset is designed to aid research in the ongoing study of the pill ideologies subreddits,
43
+ which have risen in response to the clashes between traditional gender roles and the rise of fourth wave feminism.
44
+ """
45
+
46
+ # TODO: Add a link to an official homepage for the dataset here
47
+ _HOMEPAGE = "https://huggingface.co/datasets/steamcyclone/Pill-Ideologies-New-Test"
48
+
49
+ # TODO: Add the licence for the dataset here if you can find it
50
+ _LICENSE = "Creative Commons" # cc
51
+
52
+ # TODO: Add link to the official dataset URLs here
53
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
+ _URLS = {
56
+ "train": "https://huggingface.co/datasets/steamcyclone/Pill-Ideologies-New-Test/blob/main/reddit_posts_fm.csv",
57
+ "dev": "https://huggingface.co/datasets/steamcyclone/Pill-Ideologies-New-Test/blob/main/reddit_posts_fm.csv",
58
+ }
59
+
60
+
61
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
+ class SubRedditPosts(datasets.GeneratorBasedBuilder):
63
+ """This dataset contains data from the pill ideologies subreddits and the feminism subreddit.
64
+
65
+ It has the subreddit,post_id, title, text, url, score, author, and date.
66
+
67
+ It was fully scraped on February 3rd, 2024."""
68
+
69
+ VERSION = datasets.Version("1.1.0")
70
+
71
+ # This is an example of a dataset with multiple configurations.
72
+ # If you don't want/need to define several sub-sets in your dataset,
73
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
74
+
75
+ # If you need to make complex sub-parts in the datasets with configurable options
76
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
77
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
78
+
79
+ # You will be able to load one or the other configurations in the following list with
80
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
81
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
82
+ BUILDER_CONFIGS = [
83
+ datasets.BuilderConfig(
84
+ name="train",
85
+ version=VERSION,
86
+ description="This part of my dataset covers a first domain",
87
+ ),
88
+ datasets.BuilderConfig(
89
+ name="dev",
90
+ version=VERSION,
91
+ description="This part of my dataset covers a second domain",
92
+ ),
93
+ ]
94
+
95
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
96
+
97
+ def show_me():
98
+
99
+ return _DESCRIPTION
100
+
101
+ def _info(self):
102
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
103
+ # This is the name of the configuration selected in BUILDER_CONFIGS above
104
+ features = datasets.Features(
105
+ {
106
+ "subreddit": datasets.Value("string"),
107
+ "id": datasets.Value("string"),
108
+ "title": datasets.Value("string"),
109
+ "text": datasets.Value("string"),
110
+ "url": datasets.Value("string"),
111
+ "score": datasets.Value("int64"),
112
+ "author": datasets.Value("string"),
113
+ "date": datasets.Value("string"),
114
+ # These are the features of your dataset like images, labels ...
115
+ }
116
+ )
117
+
118
+ return datasets.DatasetInfo(
119
+ # This is the description that will appear on the datasets page.
120
+ description=_DESCRIPTION,
121
+ # This defines the different columns of the dataset and their types
122
+ features=features, # Here we define them above because they are different between the two configurations
123
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
124
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
125
+ # supervised_keys=("sentence", "label"),
126
+ # Homepage of the dataset for documentation
127
+ supervised_keys=None,
128
+ homepage=_HOMEPAGE,
129
+ # License for the dataset if available
130
+ license=_LICENSE,
131
+ # Citation for the dataset
132
+ citation=_CITATION,
133
+ )
134
+
135
+ def _split_generators(self, dl_manager):
136
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
137
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
138
+
139
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
140
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
141
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
142
+ urls = _URLS[self.config.name]
143
+ data_dir = dl_manager.download_and_extract(urls)
144
+ print(data_dir, "is the data_dir")
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ # These kwargs will be passed to _generate_examples
149
+ gen_kwargs={
150
+ "filepath": os.path.join(data_dir, "train.jsonl"),
151
+ "split": "train",
152
+ },
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.VALIDATION,
156
+ # These kwargs will be passed to _generate_examples
157
+ gen_kwargs={
158
+ "filepath": os.path.join(data_dir, "dev.jsonl"),
159
+ "split": "dev",
160
+ },
161
+ ),
162
+ datasets.SplitGenerator(
163
+ name=datasets.Split.TEST,
164
+ # These kwargs will be passed to _generate_examples
165
+ gen_kwargs={
166
+ "filepath": os.path.join(data_dir, "test.jsonl"),
167
+ "split": "test",
168
+ },
169
+ ),
170
+ ]
171
+
172
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
173
+ def _generate_examples(self, filepath, split):
174
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
175
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
176
+ print(filepath, "is the filepath")
177
+ with open(filepath, encoding="utf-8") as f:
178
+ print(f, "is the file")
179
+ for key, row in enumerate(f):
180
+ data = json.loads(row)
181
+ print(data, "is the data")
182
+ if self.config.name == "first_domain":
183
+ # Yields examples as (key, example) tuples
184
+ yield key, {
185
+ "subreddit": data["subreddit"],
186
+ "id": data["id"],
187
+ "title": data["title"],
188
+ "text": data["text"],
189
+ "url": data["url"],
190
+ "score": data["score"],
191
+ "author": data["author"],
192
+ "date": data["date"],
193
+ }
194
+ else:
195
+ yield key, {
196
+ "subreddit": data["subreddit"],
197
+ "id": data["id"],
198
+ "title": data["title"],
199
+ "text": data["text"],
200
+ "url": data["url"],
201
+ "score": data["score"],
202
+ "author": data["author"],
203
+ "date": data["date"],
204
+ }
205
+
206
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
207
+ # def _generate_examples(self, filepath, split):
208
+ # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
209
+ # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
210
+ # print(filepath, "is the filepath")
211
+ # with open(filepath, encoding="utf-8") as f:
212
+ # print(f, "is the file")
213
+ # for key, row in enumerate(f):
214
+ # data = json.loads(row)
215
+ # print(data, "is the data")
216
+ # if self.config.name == "first_domain":
217
+ # # Yields examples as (key, example) tuples
218
+ # yield key, {
219
+ # "subreddit": data["subreddit"],
220
+ # "id": data["id"],
221
+ # "title": data["title"],
222
+ # "text": data["text"],
223
+ # "url": data["url"],
224
+ # "score": data["score"],
225
+ # "author": data["author"],
226
+ # "date": data["date"],
227
+ # }
228
+ # else:
229
+ # yield key, {
230
+ # "subreddit": data["subreddit"],
231
+ # "id": data["id"],
232
+ # "title": data["title"],
233
+ # "text": data["text"],
234
+ # "url": data["url"],
235
+ # "score": data["score"],
236
+ # "author": data["author"],
237
+ # "date": data["date"],
238
+ # }
239
+
240
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
241
+ # def _generate_examples(self, filepath, split):
242
+ # # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
243
+ # # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
244
+
245
+ # with open(filepath, encoding="utf-8") as f:
246
+ # for key, row in enumerate(f):
247
+ # data = json.loads(row)
248
+ # if self.config.name == "first_domain":
249
+ # # Yields examples as (key, example) tuples
250
+ # yield key, {
251
+ # "sentence": data["sentence"],
252
+ # "option1": data["option1"],
253
+ # "answer": "" if split == "test" else data["answer"],
254
+ # }
255
+ # else:
256
+ # yield key, {
257
+ # "sentence": data["sentence"],
258
+ # "option2": data["option2"],
259
+ # "second_domain_answer": (
260
+ # "" if split == "test" else data["second_domain_answer"]
261
+ # ),
262
+ # }