middlewares.py
class MiddlewareDownloaderMiddleware:
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# spider就是爬虫类的实例化对象
# spider.name
# 拦截所有的请求对象,包括正常与不正常
# 参数:request就是请求到的对象
# 获取或者修改请求头信息
# request.headers['Cookie'] = 'xxx'
print('i am process_request ')
return None
def process_response(self, request, response, spider):
# 拦截所有的响应对象
# 参数:response就是响应对象
print('i am process_response ')
return response
def process_exception(self, request, exception, spider):
# 拦截发生异常的请求对象
# 需要对异常的请求进行修正,然后将其重新发送即可
print('i am process_exception ')
# 代理操作
# request.meta['proxy'] = 'https://ip:port'
return request
settings.py 开启中间件
DOWNLOADER_MIDDLEWARES = {
'middleware.middlewares.MiddlewareDownloaderMiddleware': 543,
}