#!/usr/bin/env python3 # # Simple script to download StackExchange archive XML files with posts (threaded version) # # Note: you probably want to download stackoverflow.com-Posts.7z manually, as it is 18GB # and takes a long time to download. You can try using torrent: # # webtorrent https://archive.org/download/stackexchange/stackexchange_archive.torrent --select 658 # import requests import concurrent.futures import os from bs4 import BeautifulSoup as bs import pandas as pd import re base_url = "https://ia600107.us.archive.org/view_archive.php?archive=/27/items/stackexchange/{0}&file=Posts.xml" DOWNLOAD_DIR = "xml/" NUM_PARALLEL = 20 RE_IGNORE = r"_meta|stackoverflow\.com\-" def get_all_filenames(): """ Retrieve all urls from stackexchange archive. This needs quite some mangling because of special cases. """ response = requests.get("https://archive.org/download/stackexchange") if response.ok: soup = bs(response.content, "html.parser") table = soup.find("table") link_tags = table.find_all("a") urls = { "stackoverflow": "https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z" } for link in link_tags: url = link["href"] name = url.split(".stackexchange")[0].replace(".", "_").replace("-", "_") name = name.replace("_com_7z", "") if url.endswith("7z") and not re.search(RE_IGNORE, url): urls[name] = base_url.format(url) return urls urls = get_all_filenames() def download_url(dataset_name: str, url: str): if not os.path.exists(DOWNLOAD_DIR): os.mkdir(DOWNLOAD_DIR) cache_path = os.path.join(DOWNLOAD_DIR, dataset_name + ".xml") if os.path.exists(cache_path): print("Using cached: ", cache_path) return cache_path else: print("Downloading xml: ", dataset_name) response = requests.get(url) print("Finished downloading: ", dataset_name) with open(cache_path, "wb") as f: f.write(response.content) return cache_path with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_PARALLEL) as executor: futures = [ executor.submit(download_url, dataset, url) for dataset, url in urls.items() ] # Wait for all downloads to complete concurrent.futures.wait(futures) print("All downloads complete")