使用Python爬取代理ip

时间:2022-12-27 16:58:48

本文主要代码用于有代理网站http://www.youdaili.net/中的代理ip爬取,爬虫使用过程中需要输入含有代理ip的网页链接。

测试ip是否可以用

import telnetlib
def filter_ip(ip_info):
'''
判断给定代理ip是否可用
:param ip_info:
:return:
'''
ip, port = ip_info.split(':')
try:
telnetlib.Telnet(ip, port=int(port), timeout=5)
except:
return False
else:
return True

获取指定页面的代理ip地址

def get_ip(url):
'''
爬取url中的全部代理ip
:param url: 存在代理ip的网页
:return:
'''
headers = {
'Hos': 'www.youdaili.net',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://www.youdaili.net/',
'Cookie': 'yd_cookie=073e53b5-1129-4519ac7f2522cab720c9ef1fd5ece2b443e9; Hm_lvt_f8bdd88d72441a9ad0f8c82db3113a84=1496297251; Hm_lpvt_f8bdd88d72441a9ad0f8c82db3113a84=1496297640',
'Connection': 'keep-alive',
}

wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'html5lib')
ip_text = soup.select('.content > p')
ip = [e.text.split('@')[0] for e in ip_text if len(e.text) >6]
ip_good = [e for e in ip if filter_ip(e)]
return ip_good

调用爬虫

if __name__ == '__main__':
# 包含ip的网页链接
urls = ['http://www.youdaili.net/Daili/guonei/36783_{0}.html'.format(i) for i in range(2, 6)]
urls.append('http://www.youdaili.net/Daili/guonei/36783.html')

ip_good = list()
for url in urls:
tmp = get_ip(url)
ip_good.extend(tmp)
print url
print ip_good
with open('./ip.txt', 'w') as f:
f.write(json.dumps(ip_good))
f.close()