scrapy爬取美女图片

时间:2023-03-09 19:24:31
scrapy爬取美女图片

使用scrapy爬取整个网站的图片数据。并且使用 CrawlerProcess 启动。 1 # -*- coding: utf-8 -* 2 import scrapy 3 import requests

 from bs4 import BeautifulSoup

 from meinr.items import MeinrItem

 class Meinr1Spider(scrapy.Spider):
name = 'meinr1'
# allowed_domains = ['www.baidu.com']
# start_urls = ['http://m.tupianzj.com/meinv/xiezhen/']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
}
def num(self,url,headers): #获取网页每个分类的页数和URL格式
html = requests.get(url=url,headers=headers)
if html.status_code != 200:
return '',''
soup = BeautifulSoup(html.text,'html.parser')
nums = soup.select('#pageNum li')[3]
nums = nums.select('a')[0].attrs.get('href')
num = str(nums[:-5]).split('_')[-1]
papa = str(nums[:-5]).split('_')[:-1]
papa = '_'.join(papa)+'_'
return int(num),papa def start_requests(self):
      #这是网站的所有分类
urls = ['http://m.tupianzj.com/meinv/xiezhen/','http://m.tupianzj.com/meinv/xinggan/','http://m.tupianzj.com/meinv/guzhuang/','http://m.tupianzj.com/meinv/siwa/','http://m.tupianzj.com/meinv/chemo/','http://m.tupianzj.com/meinv/qipao/','http://m.tupianzj.com/meinv/mm/']
num = 0
for url in urls:
num,papa = self.num(url,self.headers)
for i in range(1,num):
if i != 1:
urlzz = url + papa + str(i) + '.html' #拼装每页URL
else:
urlzz = url
yield scrapy.Request(url=urlzz,headers=self.headers,callback=self.parse)
def parse(self, response):
# print(response.body)
htmllist = response.xpath('//div[@class="IndexList"]/ul[@class="IndexListult"]/li')#获取每页的图集URL和title
# print(htmllist)
for html in htmllist:
url = html.xpath('./a/@href').extract()
title = html.xpath('./a/span[1]/text()').extract()
# print(url)
# print(title)
yield scrapy.Request(url=url[0],meta={
'url':url[0],
'title':title[0]},
headers=self.headers,
callback=self.page
)
def page(self,response):
is_it = response.xpath('//div[@class="m-article"]/h1/text()').extract()
if is_it:
is_it = is_it[0].strip()
num = int(is_it[-4])
a = 0
for i in range(1,int(num)):
a += 1
url = str(response.url)[:-5] + '_' + str(i) + '.html' #拼装图集内的URL分页
yield scrapy.Request(url=url, headers=self.headers, callback=self.download, meta={
'url': response.meta.get('url'),
'title': response.meta.get('title'),
'num':a },dont_filter=True) #使用了dont_filter取消去重是因为我们需要进入第一页获取总页数 def download(self,response):
img = response.xpath("//img[@id='bigImg']/@src").extract() #获取每个页面里的img
if img:
time = MeinrItem()
time['img'] = img[0]
time['title'] = response.meta.get('title')
time['num'] = response.meta.get('num')
yield time

上面的是spider文件

import scrapy

class MeinrItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
img = scrapy.Field()
num = scrapy.Field()
pass

上面的是item文件

 import os
import requests class MeinrPipeline(object):
def open_spider(self,spider):#打开spider时启动。获取下载地址
self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + os.sep + 'download'
# print(self.path)
def process_item(self, item, spider):
title = item['title']
img = item['img']
num = item['num']
path = self.path + os.sep + title #将图集的title设置为每个图集的文件夹的名字
if not os.path.exists(path): #没有则创建
os.makedirs(path)
html = requests.get(url=img,headers=spider.headers).content
path = path + os.sep + str(num) + '.jpg' #这是每个图集内的图片是第几页
with open(path,'wb')as f:
f.write(html)
return item

这上面是管道文件

 import datetime,os
time = datetime.datetime.now().strftime('%Y_%m_%H_%M_%S')
LOG_FILE = 'logs'+ os.sep +str(time) + '_' + "meinr.log"
LOG_LEVEL = "INFO"

这是在setting里面的,设置的日志信息和保存的位置以及消息的级别

 # -*- coding: utf-8 -*-
import sys,datetime
import os from meinr.spiders.meinr1 import Meinr1Spider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings process = CrawlerProcess(get_project_settings()) #这里获取spider里面的setting
process.crawl(Meinr1Spider) #使用crawl启动Meinr1Spider爬虫
12 process.start() #开始运行

这是spider的启动文件

scrapy爬取美女图片

文件格式就是这样

Git地址 :https://github.com/18370652038/meinr.git