qgyd2021's picture
[update]edist gitattributes
ead407c
raw
history blame
5.17 kB
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import json
import random
import re
import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
pattern_without_rating = r"""
name":"(.*)","image":\[(?:.*)\],"brand":"(.*)","review":"(.*)","description":"(.*)","mpn":"(.*)","color":"(.*)","size":"(.*)","sku":"(.*?)".*,"offers":{"@type":"Offer","availability":"(?:.*)","priceCurrency":"(.*)","price":"(.*)","priceValidUntil":"(.*)","url
"""
pattern_without_rating = str(pattern_without_rating).strip()
pattern_review = r"""
aggregateRating":(?:.*)ratingValue":"(.*)","reviewCount":"(.*)"},"offers
"""
pattern_review = str(pattern_review).strip()
pattern_description = r"""
<div class="des-title-div">.*<h2 class="des-title">
"""
pattern_description = str(pattern_description).strip()
def parse_text(text: str):
match = re.search(pattern_without_rating, text, flags=re.IGNORECASE)
title = match.group(1)
brand = match.group(2)
review = match.group(3)
description = match.group(4)
mpn = match.group(5)
sku = match.group(8)
price = match.group(10)
match = re.search(pattern_review, text, flags=re.IGNORECASE)
if match is not None:
rating_value = match.group(1)
review_count = match.group(2)
else:
rating_value = None
review_count = None
soup = BeautifulSoup(text, features="html.parser")
matches = soup.find_all(class_="description-item")
result = list()
for match in matches:
match_text = match.text.strip()
match_text = str(match_text).strip().split("\n")
for row in match_text:
row = str(row).strip()
if len(row) == 0:
continue
if row in ("Size Chart", "Photos"):
break
result.append(row)
overview = "\n".join(result)
matches = soup.find_all(class_="breadcrumbAB")
result = list()
for match in matches:
match_text = match.text.strip()
match_text = str(match_text).strip().split("\n")
for row in match_text:
row = str(row).strip()
if len(row) == 0:
continue
result.append(row)
category = "".join(result)
category = " > ".join(category.split(">"))
match_select_color = soup.find("select", class_="select-color-show")
matches = match_select_color.find_all("option")
color = list()
for match in matches:
match_text = match.text.strip()
if match_text == "Color":
continue
color.append(match_text)
match_selects = soup.find_all("select", class_="attr-list-select")
color_size = [[], []]
color_size_idx = None
for match_select in match_selects:
matches = match_select.find_all("option")
for idx, match in enumerate(matches):
match_text = match.text.strip()
if idx == 0:
if match_text == "Color":
color_size_idx = 0
elif match_text == "Size":
color_size_idx = 1
else:
break
continue
color_size[color_size_idx].append(match_text)
color = color_size[0]
size = color_size[1]
row = {
"title": title,
"brand": brand,
"review": str(review).strip() if len(str(review).strip()) != 0 else None,
"description": description,
"mpn": mpn,
"color": color,
"size": size,
"sku": sku,
"ratingValue": rating_value,
"reviewCount": review_count,
"overview": overview,
"category": category,
"url": "https://www.lightinthebox.com/en/p/_p{}.html".format(mpn)
}
return row
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
}
finished_mpn = set()
output_file = "product.jsonl"
with open(output_file, "r", encoding="utf-8") as f:
for row in f:
row = str(row).strip()
row = json.loads(row)
mpn = row["mpn"]
finished_mpn.add(mpn)
print("finished count: {}".format(len(finished_mpn)))
sleep_time = 1
for mpn in tqdm(range(9156603, 9999999)):
# mpn = random.randint(9000000, 9999999)
if mpn in finished_mpn:
continue
finished_mpn.add(mpn)
url = "https://www.lightinthebox.com/en/p/_p{}.html".format(mpn)
print("url: {}".format(url))
try:
resp = requests.get(url, headers=headers, timeout=2)
except Exception:
print("sleep: {}".format(sleep_time))
time.sleep(sleep_time)
sleep_time += 1
continue
text = resp.text
try:
row = parse_text(text)
except Exception:
continue
sleep_time = 1
row = json.dumps(row, ensure_ascii=False)
with open(output_file, "a+", encoding="utf-8") as f:
f.write("{}\n".format(row))
f.flush()
print(row)
if __name__ == '__main__':
pass