net spider(python 网络爬虫)

时间:2023-11-22 17:19:26
# -*- coding: utf-8 -*-
import urllib2,cookielib
from bs4 import BeautifulSoup
url="http://www.baidu.com" #第一种方法
response1=urllib2.urlopen(url)
print response1.getcode()
print len(response1.read()) #第二种方法
request=urllib2.Request(url)
request.add_header("user-agent","Mozilla/5.0")
response2=urllib2.urlopen(request)
print response2.getcode()
print len(response2.read()) #第三种方法
cj=cookielib.CookieJar()
opener=urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
response3=urllib2.urlopen(url)
print response3.getcode()
print cj
print response3.read() #BeautifulSoup实例
html_doc="""********************************************
**********************
******************
*************
*******
"""
soup=BeautifulSoup(html_doc,
'html.parser',
from_encoding='utf-8')
print "获取所有的链接"
links=soup.find_all("a")
for link in links:
print link.name,link['href'],link.get_text()
print '获取单个链接'
link_node=soup.find('a',href='http://example.com/lacie')
print link_node.name, link_node['href'], link_node.get_text() print "正则表达式"
link_node=soup.find('a',href=re.compile(r"ill"))
print link_node.name,link_node['href'],link_node.get_text() print "获取p段落文字"
p_node=soup.find('p',class_="title")
print p_node.name,p_node.get_text()