|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
import concurrent.futures |
|
import os |
|
from bs4 import BeautifulSoup as bs |
|
import pandas as pd |
|
import re |
|
|
|
base_url = "https://ia600107.us.archive.org/view_archive.php?archive=/27/items/stackexchange/{0}&file=Posts.xml" |
|
DOWNLOAD_DIR = "xml/" |
|
NUM_PARALLEL = 20 |
|
RE_IGNORE = r"_meta|stackoverflow\.com\-" |
|
|
|
|
|
def get_all_filenames(): |
|
""" |
|
Retrieve all urls from stackexchange archive. |
|
This needs quite some mangling because of special cases. |
|
""" |
|
response = requests.get("https://archive.org/download/stackexchange") |
|
if response.ok: |
|
soup = bs(response.content, "html.parser") |
|
table = soup.find("table") |
|
link_tags = table.find_all("a") |
|
urls = { |
|
"stackoverflow": "https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z" |
|
} |
|
for link in link_tags: |
|
url = link["href"] |
|
name = url.split(".stackexchange")[0].replace(".", "_").replace("-", "_") |
|
name = name.replace("_com_7z", "") |
|
if url.endswith("7z") and not re.search(RE_IGNORE, url): |
|
urls[name] = base_url.format(url) |
|
return urls |
|
|
|
|
|
urls = get_all_filenames() |
|
|
|
|
|
def download_url(dataset_name: str, url: str): |
|
if not os.path.exists(DOWNLOAD_DIR): |
|
os.mkdir(DOWNLOAD_DIR) |
|
cache_path = os.path.join(DOWNLOAD_DIR, dataset_name + ".xml") |
|
if os.path.exists(cache_path): |
|
print("Using cached: ", cache_path) |
|
return cache_path |
|
else: |
|
print("Downloading xml: ", dataset_name) |
|
response = requests.get(url) |
|
print("Finished downloading: ", dataset_name) |
|
with open(cache_path, "wb") as f: |
|
f.write(response.content) |
|
return cache_path |
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_PARALLEL) as executor: |
|
futures = [ |
|
executor.submit(download_url, dataset, url) for dataset, url in urls.items() |
|
] |
|
|
|
|
|
concurrent.futures.wait(futures) |
|
|
|
print("All downloads complete") |
|
|