[Python爬虫] 之二十二:Selenium +phantomjs 利用 pyquery抓取界面网站数据

时间:2021-09-06 08:51:11

 

  一、介绍

    本例子用Selenium +phantomjs爬取界面(https://a.jiemian.com/index.php?m=search&a=index&type=news&msg=电视)的资讯信息,输入给定关键字抓取资讯信息。

    给定关键字:数字;融合;电视

    抓取信息内如下:

      1、资讯标题

      2、资讯链接

      3、资讯时间

      4、资讯来源

 

  二、网站信息

    [Python爬虫] 之二十二:Selenium +phantomjs 利用 pyquery抓取界面网站数据

 

    [Python爬虫] 之二十二:Selenium +phantomjs 利用 pyquery抓取界面网站数据

 

  三、数据抓取

    针对上面的网站信息,来进行抓取

    1、首先抓取信息列表

      抓取代码:Elements = doc('div[class^="news-view"]')

    2、抓取标题

      抓取代码:title = element('div[class="news-header"]').find('h3').find('a').text().encode('utf8').replace(' ', '')

    3、抓取链接

      抓取代码:url = element('div[class="news-header"]').find('h3').find('a').attr('href')

    4、抓取日期

      抓取代码:strdate = element('div[class="news-footer"]').find('p').find('span').eq(1).text().encode('utf8')

    5、抓取来源

      抓取代码:source = element('div[class="news-footer"]').find('p').find('span').eq(0).find('a').text().encode('utf8').replace(' ', '')

   

  四、完整代码

# coding=utf-8
import os
import re
from selenium import webdriver
import selenium.webdriver.support.ui as ui
import time
from datetime import datetime
import IniFile
# from threading import Thread
from pyquery import PyQuery as pq
import LogFile
import mongoDB
import urllib
class jiemianSpider(object):
    def __init__(self):

        logfile = os.path.join(os.path.dirname(os.getcwd()), time.strftime('%Y-%m-%d') + '.txt')
        self.log = LogFile.LogFile(logfile)
        configfile = os.path.join(os.path.dirname(os.getcwd()), 'setting.conf')
        cf = IniFile.ConfigFile(configfile)
        webSearchUrl_list = cf.GetValue("jiemian", "webSearchUrl")
        self.keyword_list = cf.GetValue("section", "information_keywords").split(';')
        self.db = mongoDB.mongoDbBase()
        self.start_urls = []
        for word in self.keyword_list:
            self.start_urls.append(webSearchUrl_list + urllib.quote(word))

        self.driver = webdriver.PhantomJS()
        self.wait = ui.WebDriverWait(self.driver, 2)
        self.driver.maximize_window()

    def scroll_foot(self):
        '''
                滚动条拉到底部
                :return:
                '''
        js = ""
        # 如何利用chrome驱动或phantomjs抓取
        if self.driver.name == "chrome" or self.driver.name == 'phantomjs':
            js = "var q=document.body.scrollTop=10000"
        # 如何利用IE驱动抓取
        elif self.driver.name == 'internet explorer':
            js = "var q=document.documentElement.scrollTop=10000"
        return self.driver.execute_script(js)

    def Comapre_to_days(self,leftdate, rightdate):
        '''
        比较连个字符串日期,左边日期大于右边日期多少天
        :param leftdate: 格式:2017-04-15
        :param rightdate: 格式:2017-04-15
        :return: 天数
        '''
        l_time = time.mktime(time.strptime(leftdate, '%Y-%m-%d'))
        r_time = time.mktime(time.strptime(rightdate, '%Y-%m-%d'))
        result = int(l_time - r_time) / 86400
        return result

    def date_isValid(self, strDateText):
        '''
        判断日期时间字符串是否合法:如果给定时间大于当前时间是合法,或者说当前时间给定的范围内
        :param strDateText: 四种格式 '慧聪网 7小时前'; '新浪游戏 29分钟前' ; '中国行业研究网 2017-6-13'
        :return: True:合法;False:不合法
        '''
        currentDate = time.strftime('%Y-%m-%d')
        datePattern = re.compile(r'\d{4}-\d{1,2}-\d{1,2}')
        dt = strDateText.replace('/', '-')
        strDate = re.findall(datePattern, dt)
        if len(strDate) == 1:
            if self.Comapre_to_days(currentDate, strDate[0]) == 0:
                return True, currentDate
        return False, ''


    def log_print(self, msg):
        '''
        #         日志函数
        #         :param msg: 日志信息
        #         :return:
        #         '''
        print '%s: %s' % (time.strftime('%Y-%m-%d %H-%M-%S'), msg)

    def scrapy_date(self):
        strsplit = '------------------------------------------------------------------------------------'
        for link in self.start_urls:
            self.driver.get(link)

            selenium_html = self.driver.execute_script("return document.documentElement.outerHTML")
            doc = pq(selenium_html)

            infoList = []
            self.log.WriteLog(strsplit)
            self.log_print(strsplit)

            Elements = doc('div[class^="news-view"]')

            for element in Elements.items():
                strdate = element('div[class="news-footer"]').find('p').find('span').eq(1).text().encode('utf8')
                flag, date = self.date_isValid(strdate)
                if flag:
                    title = element('div[class="news-header"]').find('h3').find('a').text().encode('utf8').replace(' ', '')
                    for keyword in self.keyword_list:
                        if title.find(keyword) > -1:
                            url = element('div[class="news-header"]').find('h3').find('a').attr('href')
                            source = element('div[class="news-footer"]').find('p').find('span').eq(0).find(
                                'a').text().encode('utf8').replace(' ', '')

                            dictM = {'title': title, 'date': date,
                             'url': url, 'keyword': keyword, 'introduction': title, 'source': source}
                            infoList.append(dictM)
                            # self.log.WriteLog('title:%s' % title)
                            # self.log.WriteLog('url:%s' % url)
                            # self.log.WriteLog('source:%s' % source)
                            # self.log.WriteLog('kword:%s' % keyword)
                            # self.log.WriteLog(strsplit)

                            self.log_print('title:%s' % dictM['title'])
                            self.log_print('url:%s' % dictM['url'])
                            self.log_print('date:%s' % dictM['date'])
                            self.log_print('source:%s' % dictM['source'])
                            self.log_print('kword:%s' % dictM['keyword'])
                            self.log_print(strsplit)
                            break
            if len(infoList)>0:
                self.db.SaveInformations(infoList)

        self.driver.close()
        self.driver.quit()

obj = jiemianSpider()
obj.scrapy_date()