Python学习笔记六(免费获取代理IP)

时间:2023-03-10 04:26:19
Python学习笔记六(免费获取代理IP)

  为获取网上免费代理IP,闲的无聊,整合了一下,免费从三个代理网站获取免费代理IP,目的是在某一代理网站被限制时,仍可从可以访问的其他网站上获取代理IP。亲测可用哦!^_^  仅供大家参考,以下脚本可添加函数,用于代理IP自动访问其他东西。

 import requests
import urllib.request
from bs4 import BeautifulSoup
import random,time,re
import random
from urllib.request import FancyURLopener IPRegular = r"(([1-9]?\d|1\d{2}|2[0-4]\d|25[0-5]).){3}([1-9]?\d|1\d{2}|2[0-4]\d|25[0-5])"
headers = {'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
header1 = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ''AppleWebKit/537.36 (KHTML, like Gecko) ''Ubuntu Chromium/44.0.2403.89 '
'Chrome/44.0.2403.89 ''Safari/537.36'}
# 定义用于存放IP的列表
IPs = []
proxy_list = [] #代理一:
def getIPList1():
for i in range(1, 3):
req = urllib.request.Request(url='http://www.xicidaili.com/nt/{0}'.format(i), headers=header1)
r = urllib.request.urlopen(req)
soup = BeautifulSoup(r,'html.parser',from_encoding='utf-8')
table = soup.find('table', attrs={'id': 'ip_list'})
tr = table.find_all('tr')[1:]
#解析得到代理ip的地址,端口,和类型
for item in tr:
tds = item.find_all('td')
temp_dict = {}
kind = "{0}:{1}".format(tds[1].get_text().lower(), tds[2].get_text())
proxy_list.append(kind)
return proxy_list #代理二:
def getIPList2():
# 代理网站的地址的格式
# 根据观察url,发现各省的代理IP页面由数字控制
# 所以我们先用占位符{}代替这个数字的位置
url = 'http://www.66ip.cn/areaindex_16/{}.html'
for page in range(10):
# 先填占位符生成一个省的url
url = url.format(page)
# get()方法访问,得到一个Response对象
rsp = requests.get(url)
# Response对象的text属性得到源码
text = rsp.text
# 用BeautifulSoup()方法将源码生成能被解析的lxml格式文件
soup = BeautifulSoup(text, 'lxml')
# 用find()找放置IP的表
table = soup.find(name='table', attrs={'border': '2px'})
# 用find_all()找到所以的IP
ip_list = table.find_all(name='tr')
# 循环遍历每个IP
for addr in ip_list:
# 观察源码发现第一个tr里的内容不是IP,所以跳过
if addr == ip_list[0]:
continue
# 获取IP
ip = addr.find_all(name='td')[0].string
# 获取端口
port = addr.find_all(name='td')[1].string
proxy = '%s:%s' % (ip, port)
proxy_list.append(proxy)
return proxy_list #代理三:
def getIPList3():
request_list = []
headers = {
'Host': 'www.iphai.com',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
'Accept': r'application/json, text/javascript, */*; q=0.01',
'Referer': r'http://www.iphai.com',}
request_item = "http://www.iphai.com/free/ng"
request_list.append(request_item)
for req_id in request_list:
req = urllib.request.Request(req_id, headers=headers)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8') ip_list = re.findall(r'\d+\.\d+\.\d+\.\d+', html)
port_list = re.findall(r'(\s\d+\s)', html)
for i in range(len(ip_list)):
#total_count += 1
ip = ip_list[i]
port = re.sub(r'(^\s*)|(\s*$)/g','',port_list[i])
proxy = '%s:%s' % (ip, port)
proxy_list.append(proxy)
return proxy_list if __name__ == "__main__":
#选择可以访问的代理
list1 = ['http://www.66ip.cn/','https://www.xicidaili.com','http://www.iphai.com/free/ng']
while 1==1:
for url in list1:
try:
r = requests.get(url, timeout=5,headers=header1)
if r.status_code == 200:
if url == list1[0]:
print ("OK 网站访问正常", url)
IPs = getIPList2()
elif url == list1[1]:
print ("OK 网站访问正常", url)
IPs = getIPList1()
elif url == list1[2]:
print ("OK 网站访问正常", url)
IPs = getIPList3()
break
except :
print ("Error 不能访问!", url)
break
print("获取的代理IP为:",IPs)
time.sleep(10)