--***2019-3-27测试有效***----
第一步:
打开cmd,输入scrapy startproject taobao_s新建一个项目。
接着cd 进入我们的项目文件夹内输入scrapy genspider taobao www.taobao.com新建一个爬虫
文件内是这样的,tools是我建的一个工具模块,里面有一个处理数据的函数和selenium登录的函数。
class TaobaoSpider(scrapy.Spider):
name = \'taobao\'
# allowed_domains = [\'www.taobao.com\']
base_url = [\'https://s.taobao.com/search?q=\']
pages = 100
re_headers = {
\'user-agent\': \'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36\',
\'referer\': \'https://www.taobao.com/\',
\'accept-encoding\': \'gzip, deflate, b\',
}
i = 1
def start_requests(self):
keys = self.settings.get(\'KEYS\')#获取要搜索的关键词
self.browser,list = register()#这里调用selenium登录的方法并返回browser和一个cookies
self.browser.get(self.base_url[0]+keys)#使用browser登录淘宝商品搜索页面
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")#使用execute_script执行js操作,这里是下拉到最底下
url_i = self.browser.current_url#获取selenium界面当前的url用来错误处理
html = self.browser.page_source#获取源代码
yield scrapy.Request(url=self.base_url[0]+keys,headers=self.re_headers,cookies=list,callback=self.parse,meta={\'html\':html,\'i\':self.i,\'url\':url_i})
def parse(self, response):
time.sleep(5)#等待时间,可调
html = response.meta.get(\'html\')
i = response.meta.get("i")
url_i = response.meta.get("url")
i +=1
if i > 100:#因为翻一百页,到了之后就不在执行循环
return
try:
soup = BeautifulSoup(html,\'html.parser\')
lists = soup.select(\'#mainsrp-itemlist > div > div > div > div\')
for list in lists:#这一段是解析数据
item = TaobaoSItem()
url = list.select(\'a[class="pic-link J_ClickStat J_ItemPicA"]\')[0].attrs.get(\'href\',\'\')
name = list.select("a[class=\'J_ClickStat\']")[0].get_text().strip()
name = data_cleaning(name)
price = list.select(\'div[class="price g_price g_price-highlight"] strong\')[0].get_text()
num = list.select(\'div[class="deal-cnt"]\')[0].get_text()
shop_name = list.select("a[class=\'shopname J_MouseEneterLeave J_ShopInfo\']")[0].get_text().strip()
shop_name = data_cleaning(shop_name)
item[\'url\'] = url
item[\'name\'] = name
item[\'price\'] = price
item[\'num\'] = num
item[\'shop_name\'] = shop_name
yield item
button = self.browser.find_elements(By.XPATH,\'//a[@class="J_Ajax num icon-tag"]\')[-1]#这里是获取点击下一页的,因为到第二页以后会有二个一样class的,一个是上一页,一个是下一页。
button.click()#点击进入下一页
time.sleep(random.random()*2)
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")#下拉操作
html = self.browser.page_source
yield scrapy.Request(url=response.url,headers=self.re_headers,callback=self.parse,meta={\'html\':html,\'i\':i,\'url\':url_i},dont_filter=True)
except Exception as e:#如果被淘宝抓到就重新登录,用保存的url在接着获取数据
time.sleep(10)
print(e)
self.browser.close()
self.browser,list = register()
self.browser.get(url=url_i)
time.sleep(random.random()*2)
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight)")
html = self.browser.page_source
yield scrapy.Request(url=response.url,headers=self.re_headers,callback=self.parse,meta={\'html\':html,\'i\':i,\'url\':url_i},dont_filter=True)
def close(spider, reason):#这是结束时执行的函数,用来关掉开启的浏览器进程
spider.browser.close()
这是tools
def data_cleaning(data):#这是清洗数据的
if \' \' in data:
data = re.sub(\' \', \'\', data)
if "\'" in data:
data = re.sub("\'", \'\', data)
if r\'\n\' in data:
data = re.sub(r\'\\n\', \'\', data)
return data
def register():#这是登录的函数,主要
while True: #因为淘宝能够识别出selenium,有时我们会登录失败,会重新登录
browser = webdriver.FirefoxOptions()
browser.add_argument(\'-headless\') #无头浏览器
browser = webdriver.Firefox(firefox_options=browser)
# browser = webdriver.Firefox()
browser.get(\'https://login.taobao.com/member/login.jhtml\')#进入登录页面try:
input = WebDriverWait(browser, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, \'forget-pwd.J_Quick2Static\'))) #因为登录页面有时候是扫码登录,使用需要我们点击切换到密码登录
input.click()
except Exception as e: #因为页面有时是直接密码登录,使用如果直接是密码登录就不需要点击
print(e)
user = browser.find_element(By.ID, \'TPL_username_1\')#找到账号输入框
password = browser.find_element(By.ID, \'TPL_password_1\')#密码输入框
user.send_keys(USER) #输入账号并等待一下
time.sleep(random.random() * 2)
password.send_keys(PASSWORD)#输入密码并等待一下
time.sleep(random.random() * 1)
browser.execute_script("Object.defineProperties(navigator,{webdriver:{get:() => false}})") #淘宝对selenium的识别主要是通过navigator.webdriver,使用selenium的浏览器api显示的是True,所有我们改成fALSE就可以过淘宝的检测
action = ActionChains(browser)
time.sleep(random.random() * 1)
butt = browser.find_element(By.ID, \'nc_1_n1z\')
browser.switch_to.frame(browser.find_element(By.ID, \'_oid_ifr_\'))
browser.switch_to.default_content()
action.click_and_hold(butt).perform()
action.reset_actions()
action.move_by_offset(285, 0).perform()#输入账号密码后会有一个滑动验证
time.sleep(random.random() * 1)
button = browser.find_element(By.ID, \'J_SubmitStatic\')#登录按钮
time.sleep(random.random() * 2)
button.click()
time.sleep(random.random() * 2)
# browser.get(\'https://www.taobao.com/\')
cookie = browser.get_cookies()#获取cookies,原本想selenium实现登录,其他使用scrapy来,但是淘宝的商品搜索页的js找不到加上时间不够就没写了。
list = {}#scrapy携带的cookies需要字典类型的
for cookiez in cookie:
name = cookiez[\'name\']
value = cookiez[\'value\']
list[name] = value
if len(list) > 10:
break
else:
browser.close()
return browser,list
然后是数据保存
class TaobaoSPipeline(object):
def open_spider(self,spider): #scrapy打开时启动,这里是打开或者新建一个txt文件,文件路径是当前路径
self.f = open(\'淘宝店铺数据.txt\',\'w\')
def process_item(self, item, spider):#数据保存以字典的形式,也可以改成数据库或者csv
data = {}
data[\'url\'] = item[\'url\']
data[\'name\'] = item[\'name\']
data[\'price\'] = item[\'price\']
data[\'num\'] = item[\'num\']
data[\'shop_name\'] = item[\'shop_name\']
self.f.write(str(data)+\'\n\')
return item
def close_spider(self,spider):#scrapy结束时启动,用来关掉文件。
self.f.close()
代码地址 https://github.com/18370652038/taobao.git