![](https://img.laitimes.com/img/_0nNw4CM6IyYiwiM6ICdiwiI2EzX4xSZz91ZsAzNfRHLGZkRGZkRfJ3bs92YsETMfVmepNHLzkEVNFTRE1UMRpHW1x2RlBnVyQWQClGVF5UMR9Fd4VGdsATNfd3bkFGazxycykFaKdkYzZUbapXNXlleSdVY2pESa9VZwlHdssmch1mclRXY39CXldWYtlWPzNXZj9mcw1ycz9WL49zZuBnLxUzN2MTNzMDM2ADMxEjMwIzLc52YucWbp5GZzNmLn9Gbi1yZtl2Lc9CX6MHc0RHaiojIsJye.png)
使用beautifulsoup爬取小說網站小說的内容:
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
if __name__=='__main__':
headers = {
'referer': 'https://www.qiushibaike.com/imgrank/',
'User-Agent': 'Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36'
}
url='https://www.shicimingju.com/book/sanguoyanyi.html'
page_text=requests.get(url=url,headers=headers).content
#執行個體化對象,将源碼加載其中
soup=BeautifulSoup(page_text,'lxml')
#解析章節标題和詳情頁的url
li_list=soup.select('.book-mulu>ul>li')
fp=open('./test.txt','w',encoding='utf-8')
for li in li_list:
title=li.a.string
detail_url='https://www.shicimingju.com/'+li.a['href']
#解析章節内容
detail_page_text=requests.get(url=detail_url,headers=headers).content
detail_soup=BeautifulSoup(detail_page_text,'lxml')
div_tag=detail_soup.find('div',class_='chapter_content')
content=div_tag.text
fp.write(title+':'+content+'\n')
print(title,'ok')
輸出
結果: