python 爬取qidian某一页全部小说

时间:2022-04-25 07:32:24
 

本文纯粹用于技术练习,请勿用作非法途径

 import re
import urllib.request
from bs4 import BeautifulSoup
import time url=input("第一页网址:") def gethtml(url):
#获取页面源代码html
page=urllib.request.urlopen(url)
html=page.read().decode('utf-8') #html是一个列表
soup=BeautifulSoup(html,'html.parser') return soup def getbookurl(soup): #获取该页所有书本的链接地址
try: firsturl2=[]
bookurl=soup.find_all("h4")
bookurl1=re.findall(r'<h4><a data-bid=".*?" data-eid=".*?" href="(.*?)" target="_blank"',str(bookurl))
#print(bookurl1)
for i in range(0,len(bookurl1)):
bookurl="http:"+bookurl1[i] soup1=gethtml(bookurl) #获取每本书第一章 的url
time.sleep(0.2)
firsturl=soup1.find_all("a",{"class":"red-btn J-getJumpUrl "})
firsturl1=re.findall(r'data-firstchapterjumpurl=".*?" href="(.*?)" id="readBtn">',str(firsturl))
if firsturl1[0]=='': #由于起点限制,某些链接无法爬取,显示的是一个空列表,这里要进行判断
continue
firsturl2.append(firsturl1[0])
print(firsturl2)
return firsturl2
except:
return firsturl2 def getcontent(soup,load): content=soup.find_all("div",{"class":"read-content j_readContent"}) content1=re.compile(r'<p>([\s\S]*?)</p>') content2=content1.findall(str(content)) content3=re.sub("</?\w+[^>]*>",'',content2[0]) content4=content3.replace('。','。\n\n\0\0\0') #到此,将章节内容获取完毕 contentname=re.compile(r'<h3 class="j_chapterName">(.*?)</h3>') contentname1=contentname.findall(str(soup)) #获取章节名称 book="----------------------------------------------------------------"+contentname1[0]+"------------------------------------------------------------\n\n\n"+content4 with open(load, 'a',encoding='gb18030') as f: #这里的gb18030是GBK的父集,所以能兼容GBK不能编码的字符。 f.write(book) def nextcontent(soup): content=soup.find_all("div",{"class":"chapter-control dib-wrap"}) #print(str(content)) step=re.compile(r'<a data-eid="qd_R109" href="(.*?)" id="j_chapterNext">') content1=step.findall(str(content)) if content1 == []: step1=re.compile(r'<a data-eid="qd_R118" href="(.*?)" id="j_chapterNext">') content2=step1.findall(str(content)) url="http:"+content2[0] return url
else:
url="http:"+content1[0] return url def panduan(soup): content=soup.find_all("div",{"class":"chapter-control dib-wrap"}) #print(str(content)) step=re.compile(r'<a data-eid="qd_R109" href="(.*?)" id="j_chapterNext">') content1=step.findall(str(content)) return content1
#------------------------------------------------------------------------- #------------------------------------------------------------------------- while 1==1:
soup2=gethtml(url)
firsturl2=getbookurl(soup2) for j in range(0,len(firsturl2)):
url="http:"+firsturl2[j]
soup1=gethtml("http:"+firsturl2[j])
bookname=re.findall(r'<h1>(.*?)</h1>' ,str(soup1))
load="d:/88/%s.txt" % bookname[0]
i=0
while 1==1:
soup=gethtml(url)
getcontent(soup,load)
url=nextcontent(soup)
content1=panduan(soup)
i+=1
print("第%d章下载完成" % i) if content1 == []:
break time.sleep(0.2)
print("-------------第%d本书下载完成---------" % int(j+1))

结果图:

python 爬取qidian某一页全部小说

python 爬取qidian某一页全部小说

学习ing!!!  加油