requests-get请求

时间:2023-03-09 12:58:54
requests-get请求
 import requests

 response= requests.get('http://www.baidu.com')#get方法请求网址
print(response)
print(response.status_code)#状态码
print(response.text)#响应体
print(response.cookies)#获取cookies
另外还有response.url,response.history历史记录
 #requests的各种请求方式
import requests
requests.get('http://httpbin.org/get')
requests.post('http://httpbin.org/post')
requests.delete('http://httpbin.org/delete')
requests.head('http://httpbin.org/head')
requests.options('http://httpbin.org/options')
 #简单的get请求
#通过response.text获得响应体
import requests
response = requests.get('http://httpbin.org/get')
print(response.text) #带参数的请求
#利用params将字典形式数据传入进去,相当于urllib.parse.urlencode
data = {
'name':'germy',
'age':22
}
response = requests.get('http://httpbin.org/get',params=data)
print(response.text)
 #解析json
#response.json()相当于json.loads()方法
import requests
import json
response = requests.get('http://httpbin.org/get')
print(response.json())
print('*'*100)
print(json.loads(response.text))
 #获取并保存二进制数据,response.content即二进制数据
import requests
response= requests.get('http://inews.gtimg.com/newsapp_ls/0/1531939223/0')
#print(response.content)
with open('D://tomas.jpg','wb') as f:
f.write(response.content)
 #添加headers
import requests
response = requests.get('https://www.zhihu.com/explore')
#print(response.text)#结果返回服务器端错误,证实爬虫被知乎禁止了
#结果:<html><body><h1>500 Server Error</h1> #解决的方法是添加headers,方法非常简单,加进去就可以了
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/49.0.2623.75 Safari/537.36 LBBROWSER'}
response = requests.get('https://www.zhihu.com/explore',headers=headers)
print(response.text)