|
|
|
|
|
|
|
|
|
from bs4 import BeautifulSoup |
|
import re |
|
import requests |
|
import time |
|
import os |
|
|
|
def write_files(file_name1, file_name2, dic): |
|
f1 = open(file_name1, "w") |
|
f2 = open(file_name2, "w") |
|
for one, two in dic.items(): |
|
one = clear_Data(one) + '\n' |
|
two = clear_Data(two) + '\n' |
|
f1.write(one) |
|
f2.write(two) |
|
f1.close() |
|
f2.close() |
|
|
|
def write_file(file_name, content, first_line_content = None, type = "0"): |
|
f = open(file_name, "w") |
|
if first_line_content: |
|
f.write(first_line_content) |
|
if type == "0": |
|
for i, item in enumerate(content): |
|
f.write(str(i+1) + "、" + item + "\n") |
|
elif type == "1": |
|
for item in content: |
|
item = clear_Data(item) + '\n' |
|
f.write(item) |
|
f.close() |
|
|
|
|
|
def clear_Data(text): |
|
|
|
text = re.sub('\s', '', text) |
|
return text |
|
|
|
|
|
def chapter(html, header, dir_name): |
|
|
|
request = requests.get(url=html, headers=header) |
|
time.sleep(0.5) |
|
|
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
if len(bs.select("body > div.main3 > div.left > div.sons > div > h1")) == 0: |
|
return |
|
type2 = str(bs.select("body > div.main3 > div.left > div.sons > div > h1")[0]) |
|
if len(re.findall(":ShowYizhuYuanchuang\(", type2)) > 0 : |
|
|
|
|
|
id = re.findall('bookv_(.*).aspx', html)[0] |
|
baseurl = "https://so.gushiwen.cn/guwen/ajaxbfanyiYuanchuang.aspx?id=" + id + "&state=duanyi" |
|
|
|
request2 = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
|
|
bs2 = BeautifulSoup(request2.text, 'lxml') |
|
content = str(bs2.select("body > div.contson")[0]) |
|
ori_trans = dict(zip(re.findall('<p>(.*?)<br\/>', content), re.findall('<span style.*?>(.*?)<\/span>', content))) |
|
reference_content = str(bs2.select("body > div.cankao")[0]) |
|
reference_content = re.findall('<span style="line.*?>(.*?)<', reference_content) |
|
reference_content = reference_content if len(reference_content) > 0 \ |
|
else re.findall('<span>(.*?)<', str(bs2.select("body > div.cankao")[0])) |
|
|
|
|
|
write_files(os.path.join(dir_name, "src.txt"), os.path.join(dir_name, "tgt.txt"), ori_trans) |
|
|
|
write_file(os.path.join(dir_name, "数据来源.txt"), reference_content, first_line_content = "参考资料:\n") |
|
elif len(re.findall(":ShowYizhu\(", type2)) > 0 : |
|
|
|
ori = str(bs.select('body > div.main3 > div.left > div > div.cont > div')[0]) |
|
ori = re.findall('<p>(.*?)<\/p>', ori) |
|
|
|
|
|
id = re.findall(":ShowYizhu\((.*?)\,", type2)[0] |
|
baseurl = "https://so.gushiwen.cn/guwen/ajaxbfanyi.aspx?id=" + id |
|
request2 = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
bs2 = BeautifulSoup(request2.text, 'lxml') |
|
trans = str(bs2.select('body > div > div.shisoncont > div')[0]) |
|
trans = re.findall('<p>(.*?)<\/p>', trans) |
|
|
|
|
|
reference_content = str(bs2.select("body > div.sons > div.cankao")[0]) |
|
reference_content = re.findall('<span style="line.*?>(.*?)<', reference_content) |
|
reference_content = reference_content if len(reference_content) > 0 \ |
|
else re.findall('<span>(.*?)<', str(bs2.select("body > div.sons > div.cankao")[0])) |
|
|
|
|
|
write_file(os.path.join(dir_name, "src.txt"), ori, type="1") |
|
write_file(os.path.join(dir_name, "tgt.txt"), ori, type="1") |
|
|
|
write_file(os.path.join(dir_name, "数据来源.txt"), reference_content, first_line_content = "参考资料:\n") |
|
else: |
|
return |
|
|
|
|
|
def book(baseurl, header, bookName, lastInfo, f_log): |
|
lastSection = lastInfo[1] if isinstance(lastInfo, tuple) else None |
|
lastChap = lastInfo[2] if isinstance(lastInfo, tuple) else None |
|
|
|
request = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
|
|
|
|
chap_num = len(bs.select("body > div.main3 > div.left > div.sons > div")) |
|
flag = True |
|
|
|
for i in range(chap_num): |
|
chap_detail_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[i]) |
|
t_bookName = bookName |
|
if chap_num > 1: |
|
sectionName = re.findall('<strong>(.*)<\/strong>', chap_detail_list)[0] |
|
if lastSection is not None and lastSection != "" and lastSection != sectionName and flag: |
|
continue |
|
flag = False |
|
t_bookName = os.path.join(bookName, sectionName) |
|
if not os.path.exists(t_bookName): |
|
os.mkdir(t_bookName) |
|
f_log.write('###'+sectionName+'###\n') |
|
print(' 当前篇章:'+sectionName) |
|
chapID_chapName = dict(zip(re.findall('(https:.*aspx)', chap_detail_list), re.findall('aspx">(.*)<\/a>', chap_detail_list))) |
|
flag2 = True |
|
for html, name in chapID_chapName.items(): |
|
if '/' in name: |
|
name = name.replace('/', '&') |
|
if lastChap is not None and lastChap != "" and lastChap != name and flag2: |
|
continue |
|
flag2 = False |
|
if not os.path.exists(os.path.join(t_bookName, name)): |
|
os.mkdir(os.path.join(t_bookName, name)) |
|
f_log.write('##'+name+'##\n') |
|
print(' 当前章节:'+name) |
|
chapter(html, header, os.path.join(t_bookName, name)) |
|
lastChap = None |
|
|
|
|
|
|
|
def books(baseurl, header, lastInfo, f_log, base_dir_name = 'crawl-data'): |
|
lastBook = lastInfo[0] if isinstance(lastInfo, tuple) else None |
|
|
|
request = requests.get(url=baseurl, headers=header) |
|
time.sleep(0.5) |
|
bs = BeautifulSoup(request.text, 'lxml') |
|
|
|
|
|
book_list = str(bs.select("body > div.main3 > div.left > div.sons > div")[0]) |
|
book_bookName = dict(zip(re.findall('href="(.*)" target=', book_list), re.findall('_blank">(.*)<\/a>', book_list))) |
|
flag = True |
|
for bookurl, bookName in book_bookName.items(): |
|
|
|
if lastBook is not None and bookName != lastBook and flag: |
|
continue |
|
flag = False |
|
url = "https://so.gushiwen.cn" + bookurl |
|
dir = os.path.join(base_dir_name, bookName) |
|
if not os.path.exists(dir): |
|
os.mkdir(dir) |
|
f_log.write('####'+bookName+'####\n') |
|
print('当前书籍:' + bookName) |
|
book(url, header, dir, lastInfo, f_log) |
|
lastInfo, lastBook = None, None |
|
|
|
return True if flag else False |
|
|
|
|
|
def readLog(): |
|
f_log = open('log/crawl_log.txt', 'r', encoding='utf-8') |
|
log = f_log.read() |
|
|
|
|
|
lastBOOK, lastSection, lastChap = "", "", "" |
|
|
|
|
|
if len(re.findall('####(.*)####', log)) > 0: |
|
lastBOOK = re.findall('####(.*)####', log)[-1] |
|
else: |
|
return None |
|
|
|
|
|
BookContent = log[log.find(lastBOOK):] |
|
if len(re.findall('###(.*)###', BookContent)) > 0: |
|
lastSection = re.findall('###(.*)###', BookContent)[-1] |
|
BookContent = BookContent[BookContent.find(lastSection):] |
|
|
|
|
|
if len(re.findall('##(.*)##', BookContent)) > 0: |
|
lastChap = re.findall('##(.*)##', BookContent)[-1] |
|
|
|
f_log.close() |
|
return lastBOOK, lastSection, lastChap |
|
|
|
def main(): |
|
header = { |
|
"user-agent": "Mozilla/5.0(Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36(KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36" |
|
} |
|
urllist = [ |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e7%bb%8f%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e5%8f%b2%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e5%ad%90%e9%83%a8', |
|
'https://so.gushiwen.cn/guwen/Default.aspx?p=1&type=%e9%9b%86%e9%83%a8' |
|
] |
|
base_dir_name = '双语数据' |
|
lastInfo = None |
|
|
|
|
|
if not os.path.exists("log"): |
|
os.mkdir("log") |
|
|
|
if os.path.exists('log/crawl_log.txt'): |
|
lastInfo = readLog() |
|
|
|
if not os.path.exists(base_dir_name): |
|
os.mkdir(base_dir_name) |
|
|
|
|
|
f_log = open('log/crawl_log.txt', 'a', buffering=1) |
|
for url in urllist: |
|
if not books(url, header, lastInfo, f_log, base_dir_name): |
|
lastInfo = None |
|
f_log.close() |
|
|
|
if __name__ == '__main__': |
|
main() |