个人笔记
有不对的地方请指出
import scrapy
from..items import ImgscrItem
class ImgSpider(scrapy.Spider):
name = 'doutu'
allowed_domains = ['doutula.com']
start_urls = ['https://www.doutula.com/photo/list/?']
def parse(self, response,**kwargs):
lis=response.xpath('//div[@class="page-content text-center"]//a')
for i in lis:
ur=i.xpath('./img/@data-backup').extract_first()
item=ImgscrItem()
item['ur']=ur
yield item
import scrapy
class ImgscrItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
ur=scrapy.Field()
from scrapy.pipelines.images import ImagesPipeline
import scrapy
class ImgscrPipeline(ImagesPipeline):
def get_media_requests(self, item, info):#根据图片地址对图片进行请求
yield scrapy.Request(item['ur'])
#指定图片存储路径
def file_path(self, request, response=None, info=None, *, item=None):
imgname=request.url.split('/')[-1] #以图片url,通过/分割的最后一组数据为图片名
return imgname
def item_completed(self, results, item, info):
return item #返回给下一个即将被执行的管道类
这块换成自己定义的类
这个是图片保存地址,先建好文件夹