donfu commited on
Commit
3c8602c
·
1 Parent(s): 894a91e

Add initial scripts

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. dl-stackexchange.py +73 -0
  3. process.py +308 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ xml/
2
+ *.pyc
dl-stackexchange.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ #
3
+ # Simple script to download StackExchange archive XML files with posts (threaded version)
4
+ #
5
+ # Note: you probably want to download stackoverflow.com-Posts.7z manually, as it is 18GB
6
+ # and takes a long time to download. You can try using torrent:
7
+ #
8
+ # webtorrent https://archive.org/download/stackexchange/stackexchange_archive.torrent --select 658
9
+ #
10
+
11
+ import requests
12
+ import concurrent.futures
13
+ import os
14
+ from bs4 import BeautifulSoup as bs
15
+ import pandas as pd
16
+ import re
17
+
18
+ base_url = "https://ia600107.us.archive.org/view_archive.php?archive=/27/items/stackexchange/{0}&file=Posts.xml"
19
+ DOWNLOAD_DIR = "xml/"
20
+ NUM_PARALLEL = 20
21
+ RE_IGNORE = r"_meta|stackoverflow\.com\-"
22
+
23
+
24
+ def get_all_filenames():
25
+ """
26
+ Retrieve all urls from stackexchange archive.
27
+ This needs quite some mangling because of special cases.
28
+ """
29
+ response = requests.get("https://archive.org/download/stackexchange")
30
+ if response.ok:
31
+ soup = bs(response.content, "html.parser")
32
+ table = soup.find("table")
33
+ link_tags = table.find_all("a")
34
+ urls = {
35
+ "stackoverflow": "https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z"
36
+ }
37
+ for link in link_tags:
38
+ url = link["href"]
39
+ name = url.split(".stackexchange")[0].replace(".", "_").replace("-", "_")
40
+ name = name.replace("_com_7z", "")
41
+ if url.endswith("7z") and not re.search(RE_IGNORE, url):
42
+ urls[name] = base_url.format(url)
43
+ return urls
44
+
45
+
46
+ urls = get_all_filenames()
47
+
48
+
49
+ def download_url(dataset_name: str, url: str):
50
+ if not os.path.exists(DOWNLOAD_DIR):
51
+ os.mkdir(DOWNLOAD_DIR)
52
+ cache_path = os.path.join(DOWNLOAD_DIR, dataset_name + ".xml")
53
+ if os.path.exists(cache_path):
54
+ print("Using cached: ", cache_path)
55
+ return cache_path
56
+ else:
57
+ print("Downloading xml: ", dataset_name)
58
+ response = requests.get(url)
59
+ print("Finished downloading: ", dataset_name)
60
+ with open(cache_path, "wb") as f:
61
+ f.write(response.content)
62
+ return cache_path
63
+
64
+
65
+ with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_PARALLEL) as executor:
66
+ futures = [
67
+ executor.submit(download_url, dataset, url) for dataset, url in urls.items()
68
+ ]
69
+
70
+ # Wait for all downloads to complete
71
+ concurrent.futures.wait(futures)
72
+
73
+ print("All downloads complete")
process.py ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Simple script to convert StackExchange XML to Open Assistant format
3
+ # Original code by https://github.com/b-mc2
4
+
5
+ from bs4 import BeautifulSoup as bs
6
+ import pandas as pd
7
+ import os
8
+ import glob
9
+ import sys
10
+ from html2text import html2text
11
+ from datasets import load_dataset
12
+
13
+ CACHE_DIR = "xml/"
14
+ SOURCE = "stackexchange-{0}"
15
+ MAX_ANSWERS = 10
16
+ QUESTION_SCORE_TRESHOLD = 0
17
+ ANSWER_SCORE_TRESHOLD = 0
18
+ HF_DATASET = "donfu/oa-stackexchange"
19
+
20
+ xml_format_map = {
21
+ "Id": int,
22
+ "PostTypeId": int,
23
+ "CreationDate": str,
24
+ "Score": int,
25
+ "ViewCount": int,
26
+ "Body": str,
27
+ "AnswerCount": int,
28
+ "CommentCount": int,
29
+ "ContentLicense": str,
30
+ "AcceptedAnswerId": int,
31
+ "ParentId": int,
32
+ }
33
+
34
+
35
+ def main():
36
+ datasets = sys.argv[1:] if len(sys.argv) > 1 else list_cached_datasets()
37
+ for dataset in datasets:
38
+ process_dataset(dataset)
39
+
40
+
41
+ def list_cached_datasets():
42
+ xml_files = glob.glob(f"{CACHE_DIR}/*.xml")
43
+ datasets = [os.path.splitext(os.path.basename(file))[0] for file in xml_files]
44
+ datasets.sort()
45
+ return datasets
46
+
47
+
48
+ def process_dataset(dataset):
49
+ xml_file = f"{CACHE_DIR}/{dataset}.xml"
50
+ source = SOURCE.format(dataset)
51
+ if os.path.exists(xml_file):
52
+ df = xml_to_df(xml_file, source)
53
+ # df = filter_only_questions_with_accepted_answers(df)
54
+ # df = filter_scores_above(df, QUESTION_SCORE_TRESHOLD, ANSWER_SCORE_TRESHOLD)
55
+ # df = clean_tags(df)
56
+ # df = convert_html_to_markdown(df)
57
+ # df = group_qa(df)
58
+ oa = convert_to_oa(df)
59
+ save_parquet(oa, dataset)
60
+ # upload_hf(dataset)
61
+ else:
62
+ print(f"XML file {xml_file} not found, please download first. Skipping...")
63
+
64
+
65
+ def convert_to_oa(all):
66
+ """
67
+ Convert dataframe to Open Assistant format with INSTRUCTION, RESPONSE, SOURCE, METADATA columns
68
+
69
+ Only include questions with an AcceptedAnswerId
70
+ """
71
+ create_metadata = lambda row: {
72
+ "tags": row["Tags_q"]
73
+ .replace("-", " ")
74
+ .replace("><", ", ")
75
+ .replace("<", "")
76
+ .replace(">", "")
77
+ if isinstance(row["Tags_q"], str)
78
+ else "",
79
+ "score": row["Score_q"],
80
+ "views": row["ViewCount_q"],
81
+ }
82
+ questions = all[all["AcceptedAnswerId"] != 0]
83
+ merged = pd.merge(
84
+ questions,
85
+ all,
86
+ how="left",
87
+ left_on="AcceptedAnswerId",
88
+ right_on="Id",
89
+ suffixes=("_q", "_a"),
90
+ )
91
+ merged["INSTRUCTION"] = (
92
+ merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
93
+ )
94
+ merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
95
+ merged["SOURCE"] = merged["DataSource_q"]
96
+ merged["METADATA"] = merged.apply(create_metadata, axis=1)
97
+
98
+ return merged[["INSTRUCTION", "RESPONSE", "SOURCE", "METADATA"]]
99
+
100
+
101
+ def save_parquet(df, dataset):
102
+ """
103
+ Save Dataframe to Parquet. See here for specs:
104
+ https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
105
+ """
106
+ parquet_file = f"{dataset}.parquet"
107
+ df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
108
+ print("Converted data into parquet format: " + parquet_file)
109
+
110
+
111
+ def upload_hf(dataset):
112
+ """
113
+ Upload to Hugging Face
114
+ """
115
+ parquet_file = f"{dataset}.parquet"
116
+ dataset = load_dataset("parquet", data_files=parquet_file, name=dataset)
117
+ dataset.push_to_hub(HF_DATASET, max_shard_size="500MB")
118
+ print("Uploaded to Hugging Face: " + HF_DATASET)
119
+
120
+
121
+ def xml_to_df(path: str, source: str):
122
+ """
123
+ Collect and Manually import XML into Dataframe
124
+
125
+ pd.read_xml() errors when XML trees are too large, this is just a hack to
126
+ download a XML file and parse into a Dataframe. **Not Tested on huge XML files**
127
+
128
+ Parameters:
129
+ response (Requests.Response): Requests response object with the XML data
130
+
131
+ Returns:
132
+ df (DataFrame): A Dataframe from the XML file
133
+ """
134
+ with open(path, "rb") as f:
135
+ soup = bs(f, "xml")
136
+ posts = soup.find_all("row")
137
+
138
+ all_posts = [post.attrs for post in posts]
139
+
140
+ df = pd.DataFrame(all_posts)
141
+ df.AnswerCount.fillna(0, inplace=True)
142
+ df.ViewCount.fillna(0, inplace=True)
143
+ df.AcceptedAnswerId.fillna(0, inplace=True)
144
+ df.ParentId.fillna(0, inplace=True)
145
+ df["DataSource"] = source
146
+ df = df.astype(xml_format_map)
147
+ return df
148
+
149
+
150
+ def filter_only_questions_with_accepted_answers(df):
151
+ """
152
+ Filter only to Questions with Accepted Answers
153
+
154
+ Filter dataframe by questions that have accepted answers, should also include
155
+ all rows of answers for those questions, even if not accepted.
156
+
157
+ Parameters:
158
+ df (DataFrame): containing a "AcceptedAnswerId", "Id", and "ParentId" columns
159
+
160
+ Returns:
161
+ df (DataFrame): current dataframe with filtered results
162
+ """
163
+ accepted_ids = df[df["AcceptedAnswerId"] != 0]["Id"].tolist()
164
+ return df[(df["AcceptedAnswerId"] != 0) | (df["ParentId"].isin(accepted_ids))]
165
+
166
+
167
+ def filter_scores_above(
168
+ df, question_score_threshold: int = 20, answer_score_threshold: int = 20
169
+ ):
170
+ """
171
+ Filter Dataframe by minimum scores
172
+
173
+ Filter Question and Answer columns by score thresholds to trim lower scoring results
174
+
175
+ Parameters:
176
+ df (DataFrame): containing a "Score" column
177
+
178
+ Returns:
179
+ df (DataFrame): current dataframe with filtered results
180
+ """
181
+ return df[
182
+ ((df["Score"] >= question_score_threshold) & (df.PostTypeId == 1))
183
+ | ((df["Score"] >= answer_score_threshold) & (df.PostTypeId == 2))
184
+ ]
185
+
186
+
187
+ to_markdown = (
188
+ lambda row: html2text(row, bodywidth=0).strip() if isinstance(row, str) else ""
189
+ )
190
+
191
+
192
+ def convert_html_to_markdown(df, column: str = "Body"):
193
+ """
194
+ Convert HTML tags to markdown
195
+
196
+ Feeds HTML text body into markdown. Remove final newline from <p> tags
197
+
198
+ Parameters:
199
+ df (DataFrame): containing a "Body" column with HTML
200
+
201
+ Returns:
202
+ df (DataFrame): current dataframe with parsed column
203
+ """
204
+ df.dropna(subset=[column], inplace=True)
205
+ df[f"{column}Clean"] = df[column].apply(to_markdown)
206
+ return df
207
+
208
+
209
+ def clean_tags(df):
210
+ """
211
+ Convert Tags into Comma separated
212
+
213
+ Converts Tag slugs into commas separated tags
214
+
215
+ Parameters:
216
+ df (DataFrame): containing a "Tags" column with slugs
217
+
218
+ Returns:
219
+ df (DataFrame): current dataframe with parsed column
220
+ """
221
+ df["TagsClean"] = (
222
+ df["Tags"]
223
+ .str.replace("-", " ")
224
+ .str.replace("><", ", ")
225
+ .str.replace("<", "")
226
+ .str.replace(">", "")
227
+ )
228
+ return df
229
+
230
+
231
+ def group_qa(df):
232
+ """
233
+ Group Questions and Answers
234
+ """
235
+ questions = df[df.PostTypeId == 1]
236
+ answers = df[df.PostTypeId == 2]
237
+
238
+ df = pd.merge(
239
+ questions,
240
+ answers[
241
+ [
242
+ "Id",
243
+ "CreationDate",
244
+ "Score",
245
+ "ViewCount",
246
+ "CommentCount",
247
+ "ContentLicense",
248
+ "TagsClean",
249
+ "BodyClean",
250
+ "ParentId",
251
+ ]
252
+ ],
253
+ left_on="Id",
254
+ right_on="ParentId",
255
+ suffixes=("_q", "_a"),
256
+ how="left",
257
+ )
258
+
259
+ df["AcceptedAnswerFlag"] = df.apply(
260
+ lambda row: row["Id_a"] == row["AcceptedAnswerId"], axis=1
261
+ )
262
+
263
+ df = df.rename(
264
+ columns={
265
+ "BodyClean_q": "Question",
266
+ "Score_q": "QuestionScore",
267
+ "TagsClean_q": "QuestionTags",
268
+ "BodyClean_a": "Answer",
269
+ "Score_a": "AnswerScore",
270
+ "ContentLicense_q": "QuestionContentLicense",
271
+ "ContentLicense_a": "AnswerContentLicense",
272
+ "CreationDate_q": "CreationDate",
273
+ }
274
+ )
275
+
276
+ df = (
277
+ df.sort_values(
278
+ by=["AcceptedAnswerFlag", "AnswerScore"], ascending=[False, False]
279
+ )
280
+ .groupby("Question")
281
+ .head(MAX_ANSWERS)
282
+ .reset_index(drop=True)
283
+ )
284
+ df = (
285
+ df.groupby(
286
+ [
287
+ "Title",
288
+ "Question",
289
+ "QuestionScore",
290
+ "QuestionTags",
291
+ "QuestionContentLicense",
292
+ "DataSource",
293
+ "CreationDate",
294
+ ]
295
+ )
296
+ .apply(
297
+ lambda x: x[["Answer", "AnswerScore", "AcceptedAnswerFlag"]].to_dict(
298
+ "records"
299
+ )
300
+ )
301
+ .reset_index()
302
+ .rename(columns={0: "Answers"})
303
+ )
304
+ return df
305
+
306
+
307
+ if __name__ == "__main__":
308
+ main()