|
import argparse |
|
import requests |
|
import os |
|
import json |
|
from concurrent.futures import ThreadPoolExecutor |
|
|
|
def download(url, path): |
|
print(f'Downloading {url}...') |
|
response = requests.get(url) |
|
with open(path, 'wb') as f: |
|
f.write(response.content) |
|
|
|
def download_all(urls, path, thread_count): |
|
with ThreadPoolExecutor(max_workers=thread_count) as executor: |
|
for url in urls: |
|
filename = url.split('/')[-1] |
|
executor.submit(download, url, os.path.join(path, filename)) |
|
|
|
print(f'Downloaded {len(urls)} images to {path}') |
|
|
|
def main(input_path, output_path, thread_count, min_fav, min_retweet): |
|
print(f'Reading from {input_path}...') |
|
with open(input_path, 'r', encoding='utf-8') as input_file: |
|
users_and_images = json.load(input_file) |
|
|
|
image_urls = [] |
|
|
|
for user in users_and_images: |
|
for image in user['images']: |
|
if image['likes'] >= min_fav and image['retweets'] >= min_retweet: |
|
image_urls.append(image['url']) |
|
|
|
print(f'Found {len(image_urls)} images with at least {min_fav} likes and {min_retweet} retweets') |
|
|
|
print(f'\nDownloading images to {output_path}...') |
|
download_all(image_urls, output_path, thread_count) |
|
|
|
print('Done!') |
|
|
|
if __name__ == '__main__': |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('input_path', type=str, default='data.json') |
|
parser.add_argument('output_path', type=str, default='images') |
|
parser.add_argument('--thread_count', type=int, default=4) |
|
parser.add_argument('--min_fav', type=int, default=0) |
|
parser.add_argument('--min_retweet', type=int, default=0) |
|
args = parser.parse_args() |
|
|
|
main(args.input_path, args.output_path, args.thread_count, args.min_fav, args.min_retweet) |
|
|