天天看点

大学排名信息爬取

import requests
from bs4 import BeautifulSoup
import bs4

def getHTMLText(url):
    try:
        r = requests.get(url)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        return r.text
    except:
        return ""

def fillUnivList(ulist, html):
    soup = BeautifulSoup(html, 'html.parser')
    for tr in soup.find('tbody').children:
        if isinstance(tr,bs4.element.Tag ):   #判断tbody的子节点是否为标签属性,需引入bs4库
            tds = tr('td')
            ulist.append([tds[].string, tds[].string, tds[].string]) #td标签中关键信息为string类型

def printUnivList(ulist,num):
    tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"    #冒号前为引用format中参数的位置,0、1、2分别代表排名、学校、分数,^表示居中对齐,:与^之前的{3}代表填充内容,10为列宽
    print(tplt.format('排名','学校','分数',chr()))    #chr(12288)为中文空格,上句中{3}表示引用这个内容
    for i in range(num):
        u = ulist[i]   #ulist形式为:[[0, 0, 1], [0, 0, 2], ...], ulist[0]=[0, 0, 1],所以u[0]、u[1]、u[2]对应数字0,0,1
        print(tplt.format(u[],u[],u[],chr()))

def main():
    url = 'http://www.zuihaodaxue.com/zuihaodaxuepaiming2016.html'
    ulist = []    #别忘了定义ulist属性为列表
    html = getHTMLText(url)
    fillUnivList(ulist, html)
    printUnivList(ulist,)

main()
           

方式进阶:保存爬取数据

import requests
import re
from bs4 import BeautifulSoup
import bs4
import pandas as pd

try:
    r = requests.get("http://www.zuihaodaxue.com/zuihaodaxuepaiming2017.html")
    r.encoding = r.apparent_encoding
except:
    print("")
html = r.text

seq=[]
school=[]
province=[]
score=[]
soup = BeautifulSoup(html, "html.parser")
for tr in soup.find('tbody','hidden_zhpm').children:
    if isinstance(tr,bs4.element.Tag):
        tds = tr('td')
        school.append(tds[].string)
        province.append(tds[].string)
        score.append(tds[].string)

u_info = pd.DataFrame([school,province,score]).T #爬取的列表信息整理成表格对象
u_info = u_info.rename(columns={:'univ',:'province',:'score'}) #加表头
u_info.to_csv('univ_info.csv',index=False) #存储到【univ_info.csv】表格
           

继续阅读