Scrapy爬虫框架使用IP代理池

时间:2022-09-25 17:00:31

一、手动更新IP池

方法一:

1.在settings配置文件中新增IP池:

IPPOOL=[  
{"ipaddr":"61.129.70.131:8080"},
{"ipaddr":"61.152.81.193:9100"},
{"ipaddr":"120.204.85.29:3128"},
{"ipaddr":"219.228.126.86:8123"},
{"ipaddr":"61.152.81.193:9100"},
{"ipaddr":"218.82.33.225:53853"},
{"ipaddr":"223.167.190.17:42789"}
]

这些IP可以从这个几个网站获取:快代理、代理66、有代理、西刺代理、guobanjia。如果出现像下面这种提示:“由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败”或者是这种,” 由 于目标计算机积极拒绝,无法连接。”. 那就是IP的问题,更换就行了。。。。发现上面好多IP都不能用。。

2017-04-16 12:38:11 [scrapy.downloadermiddlewares.retry] DEBUG: Retrying <GET http://news.sina.com.cn/> (failed 1 times): TCP connection timed out: 10060: 由于连接方在 一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。.  
this is ip:182.241.58.70:51660
2017-04-16 12:38:32 [scrapy.downloadermiddlewares.retry] DEBUG: Retrying <GET http://news.sina.com.cn/> (failed 2 times): TCP connection timed out: 10060: 由于连接方在 一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。.
this is ip:49.75.59.243:28549
2017-04-16 12:38:33 [scrapy.crawler] INFO: Received SIGINT, shutting down gracefully. Send again to force
2017-04-16 12:38:33 [scrapy.core.engine] INFO: Closing spider (shutdown)
2017-04-16 12:38:50 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2017-04-16 12:38:53 [scrapy.downloadermiddlewares.retry] DEBUG: Gave up retrying <GET http://news.sina.com.cn/> (failed 3 times): TCP connection timed out: 10060: 由于 连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。.
2017-04-16 12:38:54 [scrapy.core.scraper] ERROR: Error downloading <GET http://news.sina.com.cn/>
Traceback (most recent call last):
File "f:\software\python36\lib\site-packages\twisted\internet\defer.py", line 1299, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "f:\software\python36\lib\site-packages\twisted\python\failure.py", line 393, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "f:\software\python36\lib\site-packages\scrapy\core\downloader\middleware.py", line 43, in process_request
defer.returnValue((yield download_func(request=request,spider=spider)))
twisted.internet.error.TCPTimedOutError: TCP connection timed out: 10060: 由于连接方在一段时间后没有正确答复或连接的主机没有反应,连接尝试失败。.

在Scrapy中与代理服务器设置相关的下载中间件是HttpProxyMiddleware,对应的类为:

这里写代码片

2.修改中间件文件middlewares.py

# -*- coding: utf-8 -*- 

# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html

import random
from scrapy import signals
from youx.settings import IPPOOL

class MyproxiesSpiderMiddleware(object):

def __init__(self,ip=''):
self.ip=ip

def process_request(self, request, spider):
thisip=random.choice(IPPOOL)
print("this is ip:"+thisip["ipaddr"])
request.meta["proxy"]="http://"+thisip["ipaddr"]

3.在settings中设置DOWNLOADER_MIDDLEWARES

DOWNLOADER_MIDDLEWARES = {
# 'youx.middlewares.MyCustomDownloaderMiddleware': 543,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': None,
'youx.middlewares.MyproxiesSpiderMiddleware': 125
}

方法二:

middlewares.py:

import base64
import random
from scrapy import signals

PROXIES = [
{'ip_port': '61.160.233.8', 'user_pass': ''},
{'ip_port': '125.93.149.186', 'user_pass': ''},
{'ip_port': '58.38.86.181', 'user_pass': ''},
{'ip_port': '119.142.86.110', 'user_pass': ''},
{'ip_port': '124.161.16.89', 'user_pass': ''},
{'ip_port': '61.160.233.8', 'user_pass': ''},
{'ip_port': '101.94.131.237', 'user_pass': ''},
{'ip_port': '219.157.162.97', 'user_pass': ''},
{'ip_port': '61.152.89.18', 'user_pass': ''},
{'ip_port': '139.224.132.192', 'user_pass': ''}
]
class ProxyMiddleware(object):
def process_request(self, request, spider):
proxy = random.choice(PROXIES)
if proxy['user_pass'] is not None:
request.meta['proxy'] = "http://%s" % proxy['ip_port']
encoded_user_pass = base64.encodestring(proxy['user_pass'])
request.headers['Proxy-Authorization'] = 'Basic ' + encoded_user_pass
else:
request.meta['proxy'] = "http://%s" % proxy['ip_port']

settings.py:

DOWNLOADER_MIDDLEWARES = {
# 'youx.middlewares.MyCustomDownloaderMiddleware': 543,
'youx.middlewares.ProxyMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': None,
}

原文链接:http://www.cnblogs.com/rwxwsblog/p/4575894.html

二、自动更新IP池

这里写个自动获取IP的类proxies.py,执行一下把获取的IP保存到txt文件中去:

# *-* coding:utf-8 *-* 
import requests
from bs4 import BeautifulSoup
import lxml
from multiprocessing import Process, Queue
import random
import json
import time
import requests

class Proxies(object):


"""docstring for Proxies"""
def __init__(self, page=3):
self.proxies = []
self.verify_pro = []
self.page = page
self.headers = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
self.get_proxies()
self.get_proxies_nn()

def get_proxies(self):
page = random.randint(1,10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nt/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower()+'://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1

def get_proxies_nn(self):
page = random.randint(1,10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nn/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower() + '://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1

def verify_proxies(self):
# 没验证的代理
old_queue = Queue()
# 验证后的代理
new_queue = Queue()
print ('verify proxy........')
works = []
for _ in range(15):
works.append(Process(target=self.verify_one_proxy, args=(old_queue,new_queue)))
for work in works:
work.start()
for proxy in self.proxies:
old_queue.put(proxy)
for work in works:
old_queue.put(0)
for work in works:
work.join()
self.proxies = []
while 1:
try:
self.proxies.append(new_queue.get(timeout=1))
except:
break
print ('verify_proxies done!')


def verify_one_proxy(self, old_queue, new_queue):
while 1:
proxy = old_queue.get()
if proxy == 0:break
protocol = 'https' if 'https' in proxy else 'http'
proxies = {protocol: proxy}
try:
if requests.get('http://www.baidu.com', proxies=proxies, timeout=2).status_code == 200:
print ('success %s' % proxy)
new_queue.put(proxy)
except:
print ('fail %s' % proxy)


if __name__ == '__main__':
a = Proxies()
a.verify_proxies()
print (a.proxies)
proxie = a.proxies
with open('proxies.txt', 'a') as f:
for proxy in proxie:
f.write(proxy+'\n')

这些IP就会保存到proxies.txt文件中去。
Scrapy爬虫框架使用IP代理池

修改代理文件middlewares.py的内容为如下:

import random  
import scrapy
from scrapy import log


# logger = logging.getLogger()

class ProxyMiddleWare(object):
"""docstring for ProxyMiddleWare"""
def process_request(self,request, spider):
'''对request对象加上proxy'''
proxy = self.get_random_proxy()
print("this is request ip:"+proxy)
request.meta['proxy'] = proxy


def process_response(self, request, response, spider):
'''对返回的response处理'''
# 如果返回的response状态不是200,重新生成当前request对象
if response.status != 200:
proxy = self.get_random_proxy()
print("this is response ip:"+proxy)
# 对当前reque加上代理
request.meta['proxy'] = proxy
return request
return response

def get_random_proxy(self):
'''随机从文件中读取proxy'''
while 1:
with open('G:\\Scrapy_work\\myproxies\\myproxies\\proxies.txt', 'r') as f:
proxies = f.readlines()
if proxies:
break
else:
time.sleep(1)
proxy = random.choice(proxies).strip()
return proxy

修改下settings文件

DOWNLOADER_MIDDLEWARES = {  
# 'youx.middlewares.MyCustomDownloaderMiddleware': 543,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware':None,
'youx.middlewares.ProxyMiddleWare':125,
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware':None
}

原文链接:http://blog.csdn.net/u011781521/article/details/70194744