以美团烤肉为例,将爬取的数据进行保存。
第一种:csv。新建一个csv文档,利用字典写入器写入头,然后把爬取好的数据进行字典构造,然后将字典逐条写入到csv文档里。
1 """ 2 爬取美团烤肉 3 """ 4 import pprint 5 import csv 6 import parsel 7 import requests 8 import json 9 10 f = open('美团烤肉.csv', mode='a', encoding='utf-8-sig', newline='') 11 csvWriter = csv.DictWriter(f, fieldnames=[ 12 '商铺id', 13 '商铺名称', 14 '烤肉类型', 15 '评论人数', 16 '平均消费', 17 '商铺评分', 18 '所在商圈', 19 '详情页', 20 ]) 21 csvWriter.writeheader() # 写入头 22 23 headers = { 24 'referer':'https://qz.meituan.com/', # 这个叫防盗链,也叫来路,没有这个可能不会返回正常的json数据 25 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36', 26 } 27 28 29 for page in range(32, 620 + 1, 32): 30 # 原始请求url= https://apimobile.meituan.com/group/v4/poi/pcsearch/110?uuid=f7325d6be06f44019907.1639106132.1.0.0&userid=394536385&limit=32&offset=64&cateId=-1&q=烤肉&token=mKqO1rGk3adC-dG4fspmVCJj-bgAAAAAhA8AAPVPiOhDFB1UirWrXZHX_ZEM-6qsRE4yHPX1o2RbzI9csT0G-CikXFP8TPrDZj0EwQ 31 # url中?后面的都是参数,下面来构建参数 32 data = { 33 "uuid": "f7325d6be06f44019907.1639106132.1.0.0", 34 "userid": "394536385", 35 "limit": "32", 36 "offset": page, 37 "cateId": "-1", 38 "q": "烤肉", 39 "token": "6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA", 40 } 41 url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/110' 42 # 构建好了参数,也有url,下面带参数进行请求 43 response = requests.get(url=url, headers=headers, params=data) # 携带参数是params,而不是data 44 results = response.json()['data']['searchResult'] 45 pprint.pprint(results) 46 for item in results: 47 shopId = item['id'] # 店铺id,用于构建详情页 48 shopName = item['title'] # 店名 49 comment = item['comments'] # 评论数 50 commentScore = item['avgscore'] # 评分 51 averagePrice = item['avgprice'] # 均价 52 shopStyle = item['backCateName'] # 烤肉类型 53 areaName = item['areaname'] # 所在地区 54 detailPage = 'https://www.meituan.com/meishi/' + str(shopId) 55 56 # if detailPage: # 如果有详情页,就把电话和地址还有营业时间提取出来 57 # res = requests.get(url=detailPage, headers=headers) 58 # print(res.text) 59 # selector = parsel.Selector(res.text) # 用parsel解析 60 # lis = selector.css('.address') 61 # for li in lis: 62 # address = li.css('p:nth-child(1)::text').get() 63 # telephone = li.css('p:nth-child(2)::text').get() 64 # businessTime = li.css('p:nth-child(3)::text').get() 65 print(shopId, shopName, shopStyle, comment, averagePrice, commentScore, areaName, detailPage, sep=' | ') 66 dit = { 67 '商铺id': shopId, 68 '商铺名称': shopName, 69 '烤肉类型': shopStyle, 70 '评论人数':comment, 71 '平均消费': averagePrice, 72 '商铺评分': commentScore, 73 '所在商圈': areaName, 74 '详情页': detailPage, 75 } 76 csvWriter.writerow(dit) # 写入数据 77 f.close() # 关闭文档
第二种:excel,利用openpyxl将数据保存成.xlsx格式的。利用openpyxl创建一个工作簿,在工作簿里新建工作表,利用行列标签写入表头。然后将采集好的数据,逐条追加到表格。
1 import random 2 import time 3 import openpyxl 4 import json 5 import requests 6 7 wb = openpyxl.Workbook() # 新建工作簿 8 ws = wb.create_sheet(index=0) # 新建工作表 9 10 # 写入头 11 ws.cell(row=1, column=1, value='商铺id') # 第一行第一列写入商铺id 12 ws.cell(row=1, column=2, value='商铺名称') # 第一行第二列写入商铺名称 13 ws.cell(row=1, column=2, value='烤肉类型') # 第一行第二列写入商铺名称 14 ws.cell(row=1, column=2, value='评论人数') # 第一行第二列写入商铺名称 15 ws.cell(row=1, column=2, value='平均消费') # 第一行第二列写入商铺名称 16 ws.cell(row=1, column=2, value='商铺评分') # 第一行第二列写入商铺名称 17 ws.cell(row=1, column=2, value='所在商圈') # 第一行第二列写入商铺名称 18 ws.cell(row=1, column=2, value='详情页') # 第一行第二列写入商铺名称 19 20 # 数据爬取部分 21 headers = { 22 'referer':'https://qz.meituan.com/', # 这个叫防盗链,也叫来路,没有这个可能不会返回正常的json数据 23 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36', 24 } 25 headersfordetailPage = { 26 'cookie':'_lxsdk_cuid=17cbaa6a9d2c8-0f1eee37234c9f-57b193e-154ac4-17cbaa6a9d2c8; __mta=219068326.1635219057156.1635219057156.1635772807405.2; _hc.v=b6f867b0-8945-0d82-17de-27259d34392f.1635773211; uuid=f7325d6be06f44019907.1639106132.1.0.0; mtcdn=K; userTicket=WHLOPRBnKWZfuLWEQGzxygULdMBRwrgPKPrRCyDp; lsu=; ci=110; rvct=110,30; client-id=e9b575c1-7f97-4502-a668-9ab75fc077be; _lxsdk=17cbaa6a9d2c8-0f1eee37234c9f-57b193e-154ac4-17cbaa6a9d2c8; u=394536385; n=YWe432409862; lt=6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA; mt_c_token=6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA; token=6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA; token2=6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA; unc=YWe432409862; lat=24.916719; lng=118.658159; firstTime=1639118168406; _lxsdk_s=17da2faf221-f83-785-d22||5; _lxsdk_s=17da2faf221-f83-785-d22||5', 27 'referer':'https://qz.meituan.com/', # 这个叫防盗链,也叫来路,没有这个可能不会返回正常的json数据 28 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36', 29 } 30 for page in range(32, 640 + 1, 32): 31 time.sleep(random.uniform(2, 5)) 32 data = { 33 "uuid": "f7325d6be06f44019907.1639106132.1.0.0", 34 "userid": "394536385", 35 "limit": "32", 36 "offset": page, 37 "cateId": "-1", 38 "q": "烤肉", 39 "token": "6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA", 40 } 41 url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/110' 42 # 请求数据 43 response = requests.get(url=url, headers=headersfordetailPage, params=data) 44 # 获取返回数据 45 results = response.json()['data']['searchResult'] # 找到需要的数据 46 for item in results: 47 shopId = item['id'] # 店铺id,用于构造详情页 48 shopName = item['title'] # 店名 49 comment = item['comments'] # 评论数 50 commentScore = item['avgscore'] # 评分 51 averagePrice = item['avgprice'] # 均价 52 shopStyle = item['backCateName'] # 烤肉类型 53 areaName = item['areaname'] # 所在地区 54 detailPage = 'https://www.meituan.com/meishi/' + str(shopId) 55 56 print(shopId, shopName, comment, commentScore, averagePrice, shopStyle, areaName, detailPage, sep=" | ") 57 ws.append([shopId, shopName, comment, commentScore, averagePrice, shopStyle, areaName, detailPage]) # 写入到表格 58 59 wb.close() # 关闭文档
第三种,使用pandas保存数据到本地,可以是csv文件也可以是xlsx文件。
1 import random 2 import time 3 4 import pandas as pd 5 import requests 6 import json 7 8 df = pd.DataFrame() # DataFrame数据格式,用于保存到本地 9 10 headers = { 11 'referer':'https://qz.meituan.com/', # 这个叫防盗链,也叫来路,没有这个可能不会返回正常的json数据 12 'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36', 13 } 14 15 for page in range(32, 640 + 1, 32): 16 print(f'-------------------------正在爬取第{int(page/32)}页数据------------------------------') 17 time.sleep(random.uniform(2,5)) # 随机休眠 18 # 请求参数 19 data = { 20 "uuid": "f7325d6be06f44019907.1639106132.1.0.0", 21 "userid": "394536385", 22 "limit": "32", 23 "offset": page, 24 "cateId": "-1", 25 "q": "烤肉", 26 "token": "6B6ZBb0iW7hhzSUDy8orYUf8zkcAAAAAjw8AAO7yylpXqXjfF6LvKTWXZg71aQGghpKWdwRj-BZPHsuI_3L751BjyIzkzEwfRU2faA", 27 } 28 # 请求网址 29 url = 'https://apimobile.meituan.com/group/v4/poi/pcsearch/110' 30 # 开始请求数据 31 response = requests.get(url=url, headers=headers, params=data) # 带参数请求网页 32 results = response.json()['data']['searchResult'] # 取到列表数据 33 for item in results: 34 shopId = item['id'] # 店铺id,用于构造详情页 35 shopName = item['title'] # 店名 36 comment = item['comments'] # 评论数 37 commentScore = item['avgscore'] # 评分 38 averagePrice = item['avgprice'] # 均价 39 shopStyle = item['backCateName'] # 烤肉类型 40 areaName = item['areaname'] # 所在地区 41 detailPage = 'https://www.meituan.com/meishi/' + str(shopId) 42 print(shopId, shopName, comment, commentScore, averagePrice, shopStyle, areaName, detailPage, sep=" | ") 43 data = pd.DataFrame({'商铺id':[shopId], '商铺名称':[shopName], '评论人数':[comment], '平均评分':[commentScore], 44 '平均价格':[averagePrice], '烤肉类型':[shopStyle], '商铺商圈':[areaName], '详情页':[detailPage]}) 45 df = pd.concat([df, data]) # 连接数据 46 # df.to_csv('美团烤肉pd.csv', encoding='utf-8-sig', index=False, mode='a') # 保存到csv 47 df.to_excel('美团烤肉pd.xlsx', encoding='utf-8-sig', index=False) # 保存到excel
第四种,保存到txt文件。