看了慕課網的一個網絡爬蟲教程。模仿着寫了一個簡單的爬取百度百科的例子。
(1)安裝Beautifulsoup4
Beautifulsoup是Python的一個網頁解析庫,使用起來很友善。http://cuiqingcai.com/1319.html這個連結是介紹如何使用。這個庫是需要安裝的,進入Pthon安裝目錄下面的Scripts目錄,執行pip install beautifulsoup進行安裝。
(2)爬蟲具體實作
爬蟲分為5個子產品,排程子產品、網頁下載下傳子產品、網頁解析子產品、URL管理子產品、輸出子產品。
URL管理子產品,負責儲存最新的網頁URL,每次取出最新的URL進行爬取。
#coding=utf8
class UrlManager(object):
def __init__(self):
self.new_urls = set()
self.old_urls = set()
#添加新的url
def _add_new_url(self, url):
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:
self.new_urls.add(url)
#批量添加url
def add_new_urls(self,urls):
if urls is None or len(urls) == 0:
return
for url in urls:
self._add_new_url(url)
#是否有新的url
def has_new_url(self):
return len(self.new_urls) != 0
#擷取新的url
def get_new_url(self):
new_url = self.new_urls.pop()
self.old_urls.add(new_url)
return new_url
URL下載下傳子產品,負責把網頁的内容下載下傳下來,使用urllib2庫進行下載下傳。
#coding utf8
import urllib2
class HtmlDownloader(object):
def download(self, url):
if url is None:
return None
response = urllib2.urlopen(url);
if response.getcode == 200:
return None
return response.read()
URL解析子產品,使用Beautifulsoup把目前網頁的其他連結和網頁簡介解析出來。
# coding:utf-8
import urlparse
from bs4 import BeautifulSoup
import re
class HtmlParser(object):
def parser(self, page_url, html_content):
if page_url is None or html_content is None:
return
soup = BeautifulSoup(html_content, "html.parser", from_encoding="utf-8")
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data
def _get_new_urls(self, page_url, soup):
new_urls = set()
links = soup.find_all('a', href=re.compile(r'/item/'))
for link in links:
new_url = link["href"]
new_url_full = urlparse.urljoin(page_url, new_url)
new_urls.add(new_url_full)
return new_urls
def _get_new_data(self, page_url, soup):
res_data = {}
res_data['url'] = page_url
# <dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>
title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1')
res_data['title'] = title_node.get_text()
# <div class="lemma-summary" label-module="lemmaSummary">
summary_node = soup.find("div", class_="lemma-summary")
res_data['summary'] = summary_node.get_text()
return res_data
輸出子產品,負責把解析的網頁内容儲存起來。
#coding=utf-8
class HtmlOutputer(object):
def __init__(self):
self.datas = []
def collect_data(self,data):
if data is None:
return
self.datas.append(data)
def outpute_html(self):
fout = open("outputer.html",'w')
fout.write("<html>")
fout.write("<body>")
fout.write("<table>")
for data in self.datas:
fout.write("<tr>")
fout.write("<td>%s<td>" % data['url'])
fout.write("<td>%s<td>" % data['title'].encode('utf-8'))
fout.write("<td>%s<td>" % data['summary'].encode('utf-8'))
fout.write("</tr>")
fout.write("</html>")
fout.write("</body>")
fout.write("</table>")
最後是排程子產品,把各個子產品之間的功能排程起來
#coding:utf8
import url_manager
import html_downloader
import html_outputer
import html_parser
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser();
self.outputer = html_outputer.HtmlOutputer()
pass
def craw(self, root_url):
self.urls._add_new_url(root_url)
count = 1;
while self.urls.has_new_url():
try:
new_url = self.urls.get_new_url()
print new_url
#得到url的内容
html_content = self.downloader.download(new_url)
#得到url的内容和url
new_urls,new_data = self.parser.parser(new_url,html_content)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
except:
print "splider failed"
if (count == 10):
break
count = count + 1;
self.outputer.outpute_html()
if __name__ == "__main__":
root_url = "https://baike.baidu.com/item/Python/407313"
obj_splider = SpiderMain()
obj_splider.craw(root_url)
工程目錄如下:
源碼路徑如下,感興趣的朋友可以去下載下傳:
https://github.com/HelloKittyNII/Spider/tree/master/%E7%99%BE%E5%BA%A6%E7%99%BE%E7%A7%91%E7%88%AC%E8%99%AB/baike_spider