Python爬虫-第四章-1-多线程多进程并发爬取Ⅲ-爬取某菜市场批发价数据

时间:2023-01-15 21:58:42
# Demo Describe:爬取北京新发菜地数据

import requests
import csv
from concurrent.futures import ThreadPoolExecutor
from fake_useragent import UserAgent

'''
本章内容:
多线程爬取页面数据案例
目标网站:http://www.xinfadi.com.cn/priceDetail.html
'''


# start------------------------------
# Reptile-爬虫
def dataReptile(currentPage):
url = 'http://www.xinfadi.com.cn/getPriceData.html'
data = {
'limit': '20',
'current': currentPage, # 分页
'pubDateStartTime': '',
'pubDateEndTime': '',
'prodPcatid': '',
'prodCatid': '',
'prodName': '',
}
ua = UserAgent()
user_agent = ua.random
headers = {'user-agent': user_agent}
resp = requests.post(url, data=data, headers=headers)
dataLst = resp.json()['list']
with open('../FileForDemo/P4Demo_threadPoolCase.csv', mode='a+', encoding='UTF-8') as file:
csvWriter = csv.writer(file)
for i in dataLst:
dataInfo = '品名:' + i['prodName'] + ', ' \
+ '最低价:' + i['lowPrice'] + ', ' \
+ '平均价:' + i['avgPrice'] + ', ' \
+ '最高价:' + i['highPrice'] + ', ' \
+ '产地:' + i['place'] + ', ' \
+ '单位:' + i['unitInfo'] + ', ' \
+ '发布日期:' + i['pubDate']
csvWriter.writerow([dataInfo])
resp.close()
print(f'第{currentPage}页下载完毕!')


if __name__ == '__main__':
with ThreadPoolExecutor(50) as t:
for i in range(1, 500):
t.submit(dataReptile, i)
print('数据全部下载完毕!!!')

# end------------------------------