Python实现爬取需要登录的网站完整示例

时间:2021-03-03 17:48:09
 from selenium import webdriver
dirver = webdriver.Firefox()
dirver.get('https://music.douban.com/')
for i in dirver.find_elements_by_css_selector('.new-albums .album-title'):
print(i.text)

读取页面整合后的结果

 import requests
from lxml import html
# 创建 session 对象。这个对象会保存所有的登录会话请求。
session_requests = requests.session()
# 提取在登录时所使用的 csrf 标记
login_url = "https://bitbucket.org/account/signin/?next=/"
result = session_requests.get(login_url)
tree = html.fromstring(result.text)
authenticity_token = list(set(tree.xpath("//input[@name='csrfmiddlewaretoken']/@value")))[0]
payload = {
"username": "<你的用户名>",
"password": "<你的密码>",
"csrfmiddlewaretoken": authenticity_token # 在源代码中,有一个名为 “csrfmiddlewaretoken” 的隐藏输入标签。
}
# 执行登录
result = session_requests.post(
login_url,
data = payload,
headers = dict(referer=login_url)
)
# 已经登录成功了,然后从 bitbucket dashboard 页面上爬取内容。
url = 'https://bitbucket.org/dashboard/overview'
result = session_requests.get(
url,
headers = dict(referer = url)
)
# 测试爬取的内容
tree = html.fromstring(result.content)
bucket_elems = tree.findall(".//span[@class='repo-name']/")
bucket_names = [bucket.text_content.replace("n", "").strip() for bucket in bucket_elems]
print(bucket_names)
 from bs4 import BeautifulSoup
import requests class CSDN(object):
def __init__(self, headers):
self.session = requests.Session()
self.headers = headers
def get_webflow(self):
url = 'http://passport.csdn.net/account/login'
response = self.session.get(url=url, headers=self.headers)
soup = BeautifulSoup(response.text, 'html.parser')
lt = soup.find('input', {'name': 'lt'})['value']
execution = soup.find('input', {'name': 'execution'})['value']
soup.clear()
return (lt, execution)
def login(self, account, password):
self.username = account
self.password = password
lt, execution = self.get_webflow()
data = {
'username': account,
'password': password,
'lt': lt,
'execution': execution,
'_eventId': 'submit'
}
url = 'http://passport.csdn.net/account/login'
response = self.session.post(url=url, headers=self.headers, data=data)
if (response.status_code == 200):
print('正常')
else:
print('异常')
def func(self):
headers1={
'Host':'write.blog.csdn.net',
'Upgrade-Insecure-Requests':'',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
}
response=self.session.get(url='http://write.blog.csdn.net/postlist',headers=headers1,allow_redirects=False)
print(response.text)
if __name__ == '__main__':
headers = {
'Host': 'passport.csdn.net',
'Origin': 'http://passport.csdn.net',
'Referer':'http://passport.csdn.net/account/login',
'Upgrade-Insecure-Requests':'',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36',
}
csdn = CSDN(headers=headers)
account = ''
password = ''
csdn.login(account=account, password=password)
csdn.func()
 #coding=utf-
import requests
import re
import time
import json
from bs4 import BeautifulSoup as BS
import sys headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
} def Get_Movie_URL():
urls = []
for i in range(,):
# 第一页的URL是不一样的,需要另外进行处理
if i != :
url = "http://www.mtime.com/top/movie/top100/index-%d.html" % i
else:
url = "http://www.mtime.com/top/movie/top100/"
r = requests.get(url=url,headers=headers)
soup = BS(r.text,'lxml')
movies = soup.find_all(name='a',attrs={'target':'_blank','href':re.compile('http://movie.mtime.com/(\d+)/'),'class':not None})
for m in movies:
urls.append(m.get('href'))
return urls def Create_Ajax_URL(url):
movie_id = url.split('/')[-]
t = time.strftime("%Y%m%d%H%M%S0368", time.localtime())
ajax_url = "http://service.library.mtime.com/Movie.api?Ajax_CallBack=true&Ajax_CallBackType=Mtime.Library.Services&Ajax_CallBackMethod=GetMovieOverviewRating&Ajax_CrossDomain=1&Ajax_RequestUrl=%s&t=%s&Ajax_CallBackArgument0=%s" % (url,t,movie_id)
return ajax_url def Crawl(ajax_url):
r = requests.get(url=ajax_url,headers=headers)
if r.status_code == :
r.encoding = 'utf-8'
result = re.findall(r'=(.*?);',r.text)[]
if result is not None:
value = json.loads(result) movieTitle = value.get('value').get('movieTitle')
TopListName = value.get('value').get('topList').get('TopListName')
Ranking = value.get('value').get('topList').get('Ranking')
movieRating = value.get('value').get('movieRating')
RatingFinal = movieRating.get('RatingFinal')
RDirectorFinal = movieRating.get('RDirectorFinal')
ROtherFinal = movieRating.get('ROtherFinal')
RPictureFinal = movieRating.get('RPictureFinal')
RStoryFinal = movieRating.get('RStoryFinal')
print(movieTitle)
if value.get('value').get('boxOffice'):
TotalBoxOffice = value.get('value').get('boxOffice').get('TotalBoxOffice')
TotalBoxOfficeUnit = value.get('value').get('boxOffice').get('TotalBoxOfficeUnit')
print('票房:%s%s' % (TotalBoxOffice,TotalBoxOfficeUnit))
print('%s——No.%s' % (TopListName,Ranking))
print('综合评分:%s 导演评分:%s 画面评分:%s 故事评分:%s 音乐评分:%s' %(RatingFinal,RDirectorFinal,RPictureFinal,RStoryFinal,ROtherFinal))
print('****' * ) def main():
urls = Get_Movie_URL()
for u in urls:
Crawl(Create_Ajax_URL(u)) # 问题所在,请求如下单个电影链接时时不时会爬取不到数据
# Crawl(Create_Ajax_URL('http://movie.mtime.com/98604/')) if __name__ == '__main__':
main()

相关工具

链接: https://pan.baidu.com/s/1oEw_MsaAWcMx7NQII6jXYg 密码: e6b6
链接: https://pan.baidu.com/s/1fSppM-hK2x9Jk9RGqvRMqg 密码: 4q43