爬虫实战4:用selenium爬取淘宝美食

时间:2023-03-09 18:18:42
爬虫实战4:用selenium爬取淘宝美食

方案1:一次性爬取全部淘宝美食信息

1. spider.py文件如下

 __author__ = 'Administrator'
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import re
from pyquery import PyQuery as pq
from config import *
import pymongo client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB] browser = webdriver.Chrome()
"""
如果把Chrome修改为使用PhantomJS
1. 首先需要安装phantomJS
2. 自定义一些配置参数,这里不加载图片以及使用缓存
browser = webdriver.PhantomJS(service_args=SERVICE_ARGS)
3. 设置窗口大小
browser.set_window_size(1400,900)
""" wait = WebDriverWait(browser, 10) #显示等待10秒再查找目标元素 def search():
# print('正在搜索') 用于phantomJS调试
try:
browser.get('https://www.taobao.com')
input1 = wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#q')) #定位搜索框
)
submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_TSearchForm > div.search-button > button'))) #定位搜索按钮
# 这里的美食可以替换为配置文件中的变量KEYWORD
input1.send_keys('KEYWORD')
submit.click()
total = wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.total'))) #定位总页数,使用右键copy selector的方法找出参数名
# 调用get_products
get_products()
return total.text except TimeoutError:
return search() # 使用翻页输入框来翻页
def next_page(page_number):
# print('正在翻页',page_number) 用于phantomJS调试
try:
input1 = wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > input'))
)
submit = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > div.form > span.btn.J_Submit'))
)
input1.clear()
input1.send_keys(page_number)
submit.click()
# 根据选择页面会高亮这个条件,来判断是否成功跳转
wait.until(EC.text_to_be_present_in_element(
(By.CSS_SELECTOR, '#mainsrp-pager > div > div > div > ul > li.item.active > span'), str(page_number)))
# 调用get_products()
get_products() except TimeoutError:
next_page(page_number) # 解析信息
def get_products():
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-itemlist .items .item')))
html = browser.page_source
doc = pq(html)
items = doc('#mainsrp-itemlist .items .item').items()
for item in items:
product = {
'image': item.find('.pic .img').attr('src'),
'price': item.find('.price').text(),
'deal': item.find('.deal-cnt').text()[:-3],
'title': item.find('.title').text(),
'shop': item.find('.shop').text(),
'location': item.find('.location').text()
}
print(product)
# 保存数据到mongodb
save_to_mongo(product) # 定义一个保存到mongodb的方法
def save_to_mongo(result):
try:
if db[MON_TABLE].insert(result):
print('存储到MONGODB成功', result)
except Exception:
print('存储到MONGODB失败', result) def main():
try:
# 输出100数字
total = search()
total = int(re.compile('(\d+)').search(total).group(1))
# 调用翻页函数
for i in range(2, total + 1):
next_page(i)
except Exception:
print('出错了') finally:
browser.close() if __name__ == '__main__':
main()

2. config.py

 __author__ = 'Administrator'
MONGO_URL = 'localhost'
MONGO_DB = 'taobao'
MON_TABLE = 'product' # 配置phantomJS
SERVICE_ARGS = ['--load-images=false', '--disk-cache=true']
KEYWORD = '美食'

方案2:上面这种方法经测试可正常运行,但是会一次性爬取全部数据,数据量较大且不能灵活控制抓取内容,下面代码基本实现方法如下

1. 把搜索的关键字直接放在url中

2. 分页抓取商品信息

3. 使用chrome的headless功能

 import pymongo
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
from config import *
from urllib.parse import quote # browser = webdriver.Chrome()
# browser = webdriver.PhantomJS(service_args=SERVICE_ARGS) chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options) wait = WebDriverWait(browser, 10)
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB] def index_page(page):
"""
抓取索引页
:param page: 页码
"""
print('正在爬取第', page, '页')
try:
url = 'https://s.taobao.com/search?q=' + quote(KEYWORD)
browser.get(url)
if page > 1:
#定位页码输入框
input = wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-pager div.form > input')))
##定位页码跳转确定按钮
submit = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '#mainsrp-pager div.form > span.btn.J_Submit')))
input.clear()
input.send_keys(page)
submit.click() """
验证是否跳转到对应的页码
只需要判断当前高亮的页码数是当前的页码数即可,可使用等待条件text_to_be_present_in_element,它会等待指定的文本出现在某一节点里面时即返回成功
我们将高亮的页面节点对应的css选择器和当前要跳转的页面作为这个等待条件的参数,那么这个等待条件就会检测此页码节点是否为指定的页码数
"""
wait.until(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#mainsrp-pager li.item.active > span'), str(page))) #等待商品信息加载,选择器'.m-itemlist .items .item'对应的页面内容就是每个商品的信息,如果加载成功,执行get_products()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.m-itemlist .items .item')))
get_products()
except TimeoutException:
index_page(page) #解析商品列表
def get_products():
"""
提取商品数据
"""
html = browser.page_source
doc = pq(html)
items = doc('#mainsrp-itemlist .items .item').items()
for item in items:
product = {
'image': item.find('.pic .img').attr('data-src'),
'price': item.find('.price').text(),
'deal': item.find('.deal-cnt').text(), #成交量
'title': item.find('.title').text(),
'shop': item.find('.shop').text(),
'location': item.find('.location').text()
}
print(product)
save_to_mongo(product) def save_to_mongo(result):
"""
保存至MongoDB
:param result: 结果
"""
try:
if db[MONGO_COLLECTION].insert(result):
print('存储到MongoDB成功')
except Exception:
print('存储到MongoDB失败') def main():
"""
遍历每一页
"""
for i in range(1, MAX_PAGE + 1):
index_page(i)
browser.close() if __name__ == '__main__':
main()

对应的配置文件如下

MONGO_URL = 'localhost'
MONGO_DB = 'taobao'
MONGO_COLLECTION = 'products' KEYWORD = 'ipad' MAX_PAGE = 100 SERVICE_ARGS = ['--load-images=false', '--disk-cache=true']