File size: 5,627 Bytes
97152f0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import requests
from bs4 import BeautifulSoup
import json
from concurrent.futures import ThreadPoolExecutor
import argparse
BASE = "https://cosppi.net"
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:108.0) Gecko/20100101 Firefox/108.0"
RETRY_MAX = 3
TIMEOUT = 20
SROTS = ["all-rank", "china-rank", "new-add", "follower-rank"]
def process_url(url):
if url.startswith("//"):
return f"https:{url}"
elif url.startswith("/"):
return f"{BASE}{url}"
else:
return url
def get_user_urls(debug, sort, page):
found_urls = []
url = f"{BASE}/sort/{sort}/page/{page}"
print(f"Fetching {url}...")
for i in range(RETRY_MAX):
try:
response = requests.get(
url,
headers={"User-Agent": USER_AGENT},
timeout=TIMEOUT,
)
response.raise_for_status()
break
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
if i < RETRY_MAX - 1:
print("Retrying after 3 seconds...")
else:
print("Max retries reached.")
if response.status_code == 404 or (debug and page >= 2):
print(f"Page {page} not found!")
return None
else:
print(f"Fetched page {page}")
soup = BeautifulSoup(response.text, "html.parser")
main_section = soup.find("main")
users = main_section.find_all("a", {"class": "sort_prof_link"})
if not users:
return None
for user in users:
raw_url = user.get("href")
processed_url = process_url(raw_url)
found_urls.append(processed_url)
print(f"User URL found: {processed_url}")
return found_urls
def get_all_user_urls(debug, sort, thread_count):
pages = range(1, 50)
with ThreadPoolExecutor(max_workers=thread_count) as executor:
user_urls = executor.map(get_user_urls, [debug] * len(pages), [sort] * len(pages), pages)
# remove None and flatten list
user_urls = [url for sublist in user_urls if sublist for url in sublist]
print(f"\nTotal number of user pages: {len(user_urls)}\n")
return user_urls
def get_user_images(user_url):
page = 1
images = []
while True:
response = requests.get(
f"{user_url}/page/{page}",
headers={"User-Agent": USER_AGENT},
)
if response.status_code == 404:
print(f"User {user_url} not found!")
break
else:
print(f"Fetching user images {user_url}...")
soup = BeautifulSoup(response.text, "html.parser")
img_wrapper_outer = soup.find("div", {"class": "img_wrapper_outer"})
image_wrappers = img_wrapper_outer.find_all("div", {"class": "img_wrapper"})
if not image_wrappers:
print(f"User {user_url} has no images!")
break
print(f"Found {len(image_wrappers)} images on page {page}...")
for image_wrapper in image_wrappers:
url = image_wrapper.find("img").get("data-src")
# remove last ":small" or ":large"
url = url.rsplit(":", 1)[0]
span_tags = image_wrapper.find("div", {"class": "img_num"}).find_all("span")
[likes, retweets] = [int(span_tag.text) for span_tag in span_tags]
image_item = {
"url": url,
"likes": likes,
"retweets": retweets,
}
images.append(image_item)
page += 1
print(f"Images found in article {user_url}: {len(images)}")
username = user_url.split("/")[-1]
return {
"username": username,
"images": images,
}
def get_image_urls(user_urls, thread_count):
with ThreadPoolExecutor(max_workers=thread_count) as executor:
results = executor.map(get_user_images, user_urls)
return [result for result in results]
def main(debug, thread_count, output_path, sort):
user_urls = get_all_user_urls(debug, sort, thread_count)
print(f"\nFetching images from {len(user_urls)} user pages...")
print(user_urls)
users_and_images = get_image_urls(user_urls, thread_count)
print(f"\nTotal number of users: {len(users_and_images)}")
num_images = 0
for user in users_and_images:
num_images += len(user["images"])
print(f"Total number of images: {num_images}")
# preview
print("\nPreview:")
# 1~3
for user in users_and_images[:3]:
print(f"Username: {user['username']}")
print(f"Number of images: {len(user['images'])}")
print(f"First image: {user['images'][0]['url']}")
print(f"Last image: {user['images'][-1]['url']}")
print()
print(f"\nWriting to {output_path}...")
with open(output_path, "w", encoding="utf-8") as output_file:
json.dump(users_and_images, output_file, indent=4)
print("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="Enable debug mode (only fetches up to page 2)")
parser.add_argument("--threads", type=int, default=5, help="Number of threads to use for parallel processing")
parser.add_argument("--output", type=str, default="output.json", help="Output file name for JSON data")
parser.add_argument("--sort", type=str, default="all-rank", help="Sort type")
args = parser.parse_args()
main(args.debug, args.threads, args.output, args.sort)
|