第一个Python小爬虫

时间:2023-03-09 13:11:16
第一个Python小爬虫

这个爬虫是参考http://python.jobbole.com/81353/这篇文章写的

这篇文章可能年代过于久远,所以有些代码会报错,然后我自己稍微修改了一下,增加了一个getContentAll的方法

 # -*- coding:utf-8 -*-

 __author__ = 'HK'

 import urllib
import urllib2
import re class Tool:
#去除img标签,7位长空格
removeImg = re.compile('<img.*?>| {7}|')
#删除超链接标签
removeAddr = re.compile('<a.*?>|</a>')
#把换行的标签换为\n
replaceLine = re.compile('<tr>|<div>|</div>|</p>')
#将表格制表<td>替换为\t
replaceTD= re.compile('<td>')
#把段落开头换为\n加空两格
replacePara = re.compile('<p.*?>')
#将换行符或双换行符替换为\n
replaceBR = re.compile('<br><br>|<br>')
#将其余标签剔除
removeExtraTag = re.compile('<.*?>')
def replace(self,x):
x = re.sub(self.removeImg,"",x)
x = re.sub(self.removeAddr,"",x)
x = re.sub(self.replaceLine,"\n",x)
x = re.sub(self.replaceTD,"\t",x)
x = re.sub(self.replacePara,"\n ",x)
x = re.sub(self.replaceBR,"\n",x)
x = re.sub(self.removeExtraTag,"",x)
#strip()将前后多余内容删除
return x.strip() class BDTB: #initalizing,get the base url,and set parments is only see up
def __init__(self, baseURL, seeup):
self.baseURL = baseURL
self.seeup = str(seeup)
#self.user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
#self.referer = 'http://tieba.baidu.com/p/4899608185?see_lz=1&pn=1'
#self.Host = 'tieba.baidu.com' #iput the index, get the pageindex current post
def getPage(self, pageNum):
try:
#header = {'User-Agent': self.user_agent, 'Host': self.Host, 'Referer': self.referer}
url = self.baseURL + '?' + 'pn=' + str(pageNum)
print url
request = urllib2.Request(url)
response = urllib2.urlopen(request)
content = response.read()
return content
except urllib2.URLError, e:
if hasattr(e, "reason"):
print u'链接百度贴吧失败,错误原因', e.reason
return None def getTitel(self):
page = self.getPage(1)
pattern = re.compile('<h3 class="core_title_txt.*?>(.*?)</h3>', re.S)
result = re.search(pattern, page)
if result:
print result.group(1).strip()
else:
print 'there is no content catch the re'
return None def getContent(self, pageIndex):
pattern = re.compile('<div id="post_content_.*?>(.*?)</div>', re.S)
items = re.findall(pattern, self.getPage(pageIndex))
tool = Tool()
txt = ""
for item in items:
txt += '\t' + tool.replace(str(item)) + '\r\n'
return txt def getPageNum(self):
pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)
result = re.search(pattern, bdtb.getPage(1))
if result:
return result.group(1).strip()
else:
return None def getContentAll(self):
pageMax = self.getPageNum()
txtlog = open('txtContent.txt', 'wb+')
txtlog.seek(0)
for index in range(1, int(pageMax)+1):
txtlog.write(str(index) + self.getContent(index))
txtlog.flush()
txtlog.close()
print 'Over' baseURL = 'http://tieba.baidu.com/p/4899608185'
bdtb = BDTB(baseURL, 1)
bdtb.getTitel()
print bdtb.getPageNum()
bdtb.getContentAll()

直接行就能看到结果