|
|
|
|
|
|
|
|
|
from bs4 import BeautifulSoup |
|
import re |
|
import requests |
|
import time |
|
import os |
|
|
|
|
|
|
|
def clear_Data(text): |
|
|
|
text = re.sub('\s', '', text) |
|
return text |
|
|
|
|
|
def chapter(html, header, dir_name): |
|
|
|
|
|
|
|
|
|
request = requests.get(url=html, headers=header) |
|
time.sleep(0.5) |
|
|
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
if len(bs.select("body > div.main3 > div.left > div.sons > div > h1")) == 0: |
|
return |
|
type2 = str(bs.select("body > div.main3 > div.left > div.sons > div > h1")[0]) |
|
ori = None |
|
if len(re.findall(":ShowYizhuYuanchuang\(", type2)) > 0 : |
|
|
|
|
|
id = re.findall('bookv_(.*).aspx', html)[0] |
|
baseurl = "https://so.gushiwen.cn/guwen/ajaxbfanyiYuanchuang.aspx?id=" + id + "&state=duanyi" |
|
|
|
request2 = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
|
|
bs2 = BeautifulSoup(request2.text, 'lxml') |
|
content = str(bs2.select("body > div.contson")[0]) |
|
ori = re.findall('<p>(.*?)<br\/>', content) |
|
elif len(re.findall(":ShowYizhu\(", type2)) > 0 : |
|
|
|
ori = str(bs.select('body > div.main3 > div.left > div > div.cont > div')[0]) |
|
ori = re.findall('<p>(.*?)<\/p>', ori) |
|
else: |
|
content = str(bs.select("#left0 > div.sons > div.cont > div")[0]) |
|
ori = re.findall('<p>(.*?)<\/p>', content) |
|
|
|
|
|
f1 = open(os.path.join(dir_name, "text.txt"), "w") |
|
for item in ori: |
|
item = clear_Data(item) + '\n' |
|
f1.write(item) |
|
f1.close() |
|
|
|
|
|
def book(baseurl, header, bookName, lastInfo, flog): |
|
lastSection = lastInfo[1] if isinstance(lastInfo, tuple) else None |
|
lastChap = lastInfo[2] if isinstance(lastInfo, tuple) else None |
|
|
|
request = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
|
|
|
|
chap_num = len(bs.select("body > div.main3 > div.left > div.sons > div")) |
|
flag = True |
|
|
|
for i in range(chap_num): |
|
chap_detail_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[i]) |
|
t_bookName = bookName |
|
if chap_num > 1: |
|
sectionName = re.findall('<strong>(.*)<\/strong>', chap_detail_list)[0] |
|
if lastSection is not None and lastSection != "" and lastSection != sectionName and flag: |
|
continue |
|
flag = False |
|
t_bookName = os.path.join(bookName, sectionName) |
|
if not os.path.exists(t_bookName): |
|
os.mkdir(t_bookName) |
|
flog.write('###'+sectionName+'###\n') |
|
print(' 当前篇章:'+sectionName) |
|
chapID_chapName = dict(zip(re.findall('(https:.*aspx)', chap_detail_list), re.findall('aspx">(.*)<\/a>', chap_detail_list))) |
|
flag2 = True |
|
for html, name in chapID_chapName.items(): |
|
if '/' in name: |
|
name = name.replace('/', '&') |
|
if lastChap is not None and lastChap != "" and lastChap != name and flag2: |
|
continue |
|
flag2 = False |
|
if not os.path.exists(os.path.join(t_bookName, name)): |
|
os.mkdir(os.path.join(t_bookName, name)) |
|
flog.write('##'+name+'##\n') |
|
print(' 当前章节:'+name) |
|
chapter(html, header, os.path.join(t_bookName, name)) |
|
lastChap = None |
|
|
|
|
|
def books(baseurl, header, lastInfo, flog, base_dir_name = 'crawl-data'): |
|
lastBook = lastInfo[0] if isinstance(lastInfo, tuple) else None |
|
|
|
request = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
|
|
book_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[0]) |
|
book_bookName = dict(zip(re.findall('href="(.*)" target=', book_list), re.findall('_blank">(.*)<\/a>', book_list))) |
|
flag = True |
|
for bookurl, bookName in book_bookName.items(): |
|
|
|
if lastBook is not None and bookName != lastBook and flag: |
|
continue |
|
flag = False |
|
url = "https://so.gushiwen.cn" + bookurl |
|
dir = os.path.join(base_dir_name, bookName) |
|
if not os.path.exists(dir): |
|
os.mkdir(dir) |
|
flog.write('####'+bookName+'####\n') |
|
print('当前书籍:' + bookName) |
|
book(url, header, dir, lastInfo, flog) |
|
lastInfo, lastBook = None, None |
|
|
|
return True if flag else False |
|
|
|
def readLog(): |
|
flog = open('log/crawl_src_log.txt', 'r', encoding="utf-8") |
|
log = flog.read() |
|
|
|
|
|
lastBOOK, lastSection, lastChap = "", "", "" |
|
|
|
|
|
if len(re.findall('####(.*)####', log)) > 0: |
|
lastBOOK = re.findall('####(.*)####', log)[-1] |
|
else: |
|
return None |
|
|
|
|
|
BookContent = log[log.find(lastBOOK):] |
|
if len(re.findall('###(.*)###', BookContent)) > 0: |
|
lastSection = re.findall('###(.*)###', BookContent)[-1] |
|
BookContent = BookContent[BookContent.find(lastSection):] |
|
|
|
|
|
if len(re.findall('##(.*)##', BookContent)) > 0: |
|
lastChap = re.findall('##(.*)##', BookContent)[-1] |
|
|
|
flog.close() |
|
return lastBOOK, lastSection, lastChap |
|
|
|
def main(): |
|
header = { |
|
"user-agent": "Mozilla/5.0(Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36(KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" |
|
} |
|
urllist = [ |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e7%bb%8f%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e5%8f%b2%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e5%ad%90%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e9%9b%86%e9%83%a8' |
|
] |
|
base_dir_name = '古文原文' |
|
lastInfo = None |
|
|
|
|
|
if not os.path.exists("log"): |
|
os.mkdir("log") |
|
|
|
if os.path.exists('log/crawl_src_log.txt'): |
|
lastInfo = readLog() |
|
|
|
if not os.path.exists(base_dir_name): |
|
os.mkdir(base_dir_name) |
|
|
|
|
|
f_log = open('log/crawl_src_log.txt', 'a', buffering=1) |
|
for url in urllist: |
|
if not books(url, header, lastInfo, f_log, base_dir_name): |
|
lastInfo = None |
|
f_log.close() |
|
|
|
if __name__ == '__main__': |
|
main() |