一般来说爬虫类框架抓取Ajax动态页面都是通过一些第三方的webkit库去手动执行html页面中的js代码, 最后将生产的html代码交给spider分析。本篇文章则是通过利用fiddler抓包获取json数据分析Ajax页面的具体请求内容,找到获取数据的接口url,直接调用该接口获取数据,省去了引入python-webkit库的麻烦,而且由于一般ajax请求的数据都是结构化数据,这样更省去了我们利用xpath解析html的痛苦。
手机打开糗事百科APP ,利用fiddler抓包获取json数据 检查 得到的接口url是否能正常访问 如果能访问在换个浏览器试试 如图
打开之后的json数据如图推荐用json—handle插件(chrome安装)打开
代码实现:以99页为例
items.py
import scrapy class QiushibalkeItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
uid=scrapy.Field()
nickname = scrapy.Field()
gender=scrapy.Field() astrology=scrapy.Field() content=scrapy.Field()
crawl_time=scrapy.Field()
spiders/qiushi.py
# -*- coding: utf-8 -*-
import scrapy
import json
from qiushibalke.items import QiushibalkeItem
from datetime import datetime
class QiushiSpider(scrapy.Spider):
name = "qiushi"
allowed_domains = ["m2.qiushibaike.com"]
def start_requests(self):
for i in range(1,100):
url = "https://m2.qiushibaike.com/article/list/text?page={}".format(i)
yield scrapy.Request(url,callback=self.parse_item) def parse_item(self, response):
datas = json.loads(response.text)["items"]
print(datas)
for data in datas:
# print(data['votes']['up'])
# print(data['user']['uid'])
# print(data['user']["login"])
# print(data['user']["gender"])
# print(data['user']["astrology"]) item = QiushibalkeItem()
item["uid"]= data['user']["uid"] item["nickname"] = data['user']["login"]
item["gender"] = data['user']["gender"] item["astrology"] = data['user']["astrology"]
item["content"]=data["content"]
item["crawl_time"] = datetime.now() yield item
pipelines.py
import pymysql
class QiushibalkePipeline(object):
def process_item(self, item, spider):
con = pymysql.connect(host="127.0.0.1", user="youusername", passwd="youpassword", db="qiushi", charset="utf8")
cur = con.cursor()
sql = ("insert into baike(uid,nickname,gender,astrology,content,crawl_time)"
"VALUES(%s,%s,%s,%s,%s,%s)")
lis = (item["uid"],item["nickname"],item["gender"],item["astrology"],item["content"],item["crawl_time"])
cur.execute(sql, lis)
con.commit()
cur.close()
con.close() return item
settings.py
BOT_NAME = 'qiushibalke' SPIDER_MODULES = ['qiushibalke.spiders']
NEWSPIDER_MODULE = 'qiushibalke.spiders'
ROBOTSTXT_OBEY = False
DOWNLOAD_DELAY = 5
COOKIES_ENABLED = False
DEFAULT_REQUEST_HEADERS = {
"User-Agent":"qiushibalke_10.13.0_WIFI_auto_7",
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
}
ITEM_PIPELINES = {
'qiushibalke.pipelines.QiushibalkePipeline': 300,
# 'scrapy_redis.pipelines.RedisPipeline':300,
}
数据如图: