item.py
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DangdangItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
src = scrapy.Field()
name = scrapy.Field()
price = scrapy.Field()
dang.py
import scrapy
from ..items import DangdangItem
class DangSpider(scrapy.Spider):
name = ‘dang‘
allowed_domains = [‘category.dangdang.com‘]
start_urls = [‘http://category.dangdang.com/cp01.01.04.00.00.00.html‘]
# http://category.dangdang.com/cp01.01.04.00.00.00.html
# http://category.dangdang.com/pg2-cp01.01.04.00.00.00.html
# http://category.dangdang.com/pg3-cp01.01.04.00.00.00.html
base_url = ‘http://category.dangdang.com/pg‘
page = 1
def parse(self, response):
#src //ul[@id="component_59"]/li/a/img/@data-original
#price //ul[@id="component_59"]/li/p[@class="price"]/span[1]/text()
#name //ul[@id="component_59"]/li/p[@class="name"]/a/text()
# first_src = response.xpath(‘//ul[@id="component_59"]/li[@class="line1"]/a/img/@src‘).extract_first()
# othor_list = response.xpath(‘//ul[@id="component_59"]/li/a/img/@data-original‘)
# price_list = response.xpath(‘//ul[@id="component_59"]/li/p[@class="price"]/span[1]/text()‘)
# name_list = response.xpath(‘//ul[@id="component_59"]/li/p[@class="name"]/a/text()‘)
# print(len(first_src),len(othor_list),len(price_list),len(name_list))
li_list = response.xpath(‘//ul[@id="component_59"]/li‘)
first_src = response.xpath(‘//ul[@id="component_59"]/li[@class="line1"]/a/img/@src‘).extract_first()
for li in li_list:
src = li.xpath(‘./a/img/@data-original‘).extract_first()
if src:
src=src
else:
src=first_src
# 部分a标签 有其他标签,解决方案
name = li.xpath(‘./p[@class="name"]/a‘).xpath(‘string()‘).extract_first()
price = li.xpath(‘./p[@class="price"]/span[1]/text()‘).extract_first()
print(src,name,price)
dang = DangdangItem(src=src,name=name,price=price)
# 迭代提取dang数据项
yield dang
if self.page < 80:
self.page = self.page + 1
url = self.base_url + str(self.page) + ‘-cp01.01.04.00.00.00.html‘
print(‘00000000000000000‘)
"""
这里能打印url但是没有回调函数没有执行去爬取下一个页面
"""
print(url)
# 调用方法地址不用加圆括号,
yield scrapy.Request(url=url,callback=self.parse)
pipelines.py
# Define your item pipelines here
#
# Don‘t forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class DangdangPipeline:
def open_spider(self, spider):
self.fp = open(‘dang.json‘,‘a‘,encoding=‘utf-8‘)
# def __init__(self):
# print(‘爬取管道开启一直打开‘)
def process_item(self, item, spider):
# with open(‘dang.json‘,‘a‘,encoding=‘utf-8‘) as f:
# f.write(str(item))
self.fp.write(str(item))
return item
# 在爬取结束之后执行该方法
def close_spider(self, spider):
self.fp.close()
import urllib.request
class DangdangImagePipeline(object):
def process_item(self, item, spider):
url = item[‘src‘]
print("=========正在爬:{}======".format(item[‘name‘]))
print(url)
prefix = item[‘name‘][0:14]
filename = ‘./img/‘ + prefix + ‘.jpg‘
urllib.request.urlretrieve(url=url,filename=filename)
return item
import pymysql
from scrapy.utils.project import get_project_settings
class DangMysqlPipeline(object):
# host=None, user=None, password=None
# database=None, port=0, charset=‘‘
def open_spider(self,spider):
settings = get_project_settings()
self.conn = pymysql.Connect(host=settings[‘DB_HOST‘],
user=settings[‘DB_USER‘],
password=settings[‘DB_PASSWORD‘],
# 使用pymysql的时候,port必须是整数,charset必须是utf8没有‘-’
database=settings[‘DB_DATABASE‘],
port=settings[‘DB_PORT‘],
charset=settings[‘DB_CHARSET‘])
# print(conn)
# 游标cursor
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
# 验证数据管道是否能得到mysql基本参数:option="scrapy crawl read"
# print(self.settings[‘DB_HOST‘], self.settings[‘DB_USER‘])
# pymysql.Connect()
sql = ‘insert into dang(src,name,price) values ("{}","{}","{}")‘.format(item[‘src‘],item[‘name‘],item[‘price‘])
self.cursor.execute(sql)
self.conn.commit()
return item
def close_spider(self, spider):
self.cursor().close()
# 迷之错误
self.conn.close()
setttings.py
# Scrapy settings for dangdang project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = ‘dangdang‘
SPIDER_MODULES = [‘dangdang.spiders‘]
NEWSPIDER_MODULE = ‘dangdang.spiders‘
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36‘
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
‘Accept‘: ‘text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8‘,
‘Accept-Language‘: ‘en‘,
‘User-Agent‘: ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36‘,
‘Referer‘: ‘http://category.dangdang.com/‘,
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# ‘dangdang.middlewares.DangdangSpiderMiddleware‘: 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# ‘dangdang.middlewares.DangdangDownloaderMiddleware‘: 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# ‘scrapy.extensions.telnet.TelnetConsole‘: None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
‘dangdang.pipelines.DangdangPipeline‘: 300,
# ‘dangdang.pipelines.DangMysqlPipeline‘: 300,
# ‘dangdang.pipelines.DangdangImagePipeline‘: 299,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = ‘httpcache‘
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage‘
# database=None, port=0, charset=‘‘
DB_HOST=‘127.0.0.1‘
DB_USER=‘root‘
DB_PASSWORD=‘123456‘
DB_DATABASE=‘community‘
DB_PORT=3306
DB_CHARSET=‘utf8‘
scrapy爬取当当网整个悬疑类的书籍的src,name,img,mysql入库