Scrapy的piplines.py存储文件和存储mongodb

时间:2023-03-08 22:36:56
Scrapy的piplines.py存储文件和存储mongodb

一、将数据保存到文件

1.piplines.py文件

 import json

 class TencentPipeline(object):

     def open_spider(self,spider):
if spider.name=='hr_tencent':
self.file=open('data.json','w') def process_item(self, item, spider):
if spider.name=='hr_tencent':
data=dict(item)
# data=json.dumps(data,ensure_ascii=False)
data=json.dumps(data)
self.file.write(data+',\n')
return item def close_spider(self,spider):
if spider.name=='hr_tencent':
self.file.close()

2.settings.py文件

 ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}

二、将数据保存到mongodb

1.piplines.py文件

 from pymongo import MongoClient

 class Tencent1Pipeline(object):
def open_spider(self,spider):
if spider.name == 'hr_tencent1':
self.client=MongoClient('127.0.0.1',27017)
self.tencent=self.client['tencent']['tencent']
def process_item(self,item,spider):
if spider.name == 'hr_tencent1':
print(item)
self.tencent.insert(dict(item))
return item
def close_spider(self,spider):
if spider.name == 'hr_tencent1':
self.client.close()

2.settings.py文件

 ITEM_PIPELINES = {
'tencent.pipelines.Tencent1Pipeline': 299,
}