scrapy爬取快代理并保存mongo数据库

我们先分析下网页
scrapy爬取快代理并保存mongo数据库
scrapy爬取快代理并保存mongo数据库

这个网友的页面规律很简单
https://www.kuaidaili.com/free/inha/1
https://www.kuaidaili.com/free/inha/2
这个是页面跳转,然后xpath的规则提取很简单
接下来是代码
items.py

class url(scrapy.Item):
    #抓取内容
    ip=scrapy.Field()
    port=scrapy.Field()
    name=scrapy.Field()
    time=scrapy.Field()

爬虫主逻辑

# -*- coding: utf-8 -*-
from lxml import etree
import scrapy
from tutorial.items import url


class IpSpider(scrapy.Spider):
    name = 'ip'
    allowed_domains = ['www.kuaidaili.com/free/inha/']
    start_urls = ['https://www.kuaidaili.com/free/inha/2/']
    def start_requests(self):
      u='https://www.kuaidaili.com/free/inha/'
      for i in range(1,20):
          a=u+str(i)
          print(a)
          yield self.make_requests_from_url(a)

    def parse(self, response):
        item=url()
        for line in response.xpath("//table[@class='table table-bordered table-striped']/tbody/tr"):
            #print(line)
            item['ip']=line.xpath(".//td[@data-title='IP']/text()").extract()[0]
            item['port'] = line.xpath(".//td[@data-title='PORT']/text()").extract()[0]
            item['name'] = line.xpath(".//td[@data-title='位置']/text()").extract()[0]
            item['time'] = line.xpath(".//td[@data-title='响应速度']/text()").extract()[0]
            yield item

mongo数据库保存

import pymongo
import tutorial.settings

class TutorialPipeline(object):
    def __init__(self):
        host = tutorial.settings.MONGODB_HOST
        port = tutorial.settings.MONGODB_PORT
        dbname = tutorial.settings.MONGODB_DBNAME
        sheetname = tutorial.settings.MONGODB_SHEETNAME
        # 创建MONGODB数据库链接
        client = pymongo.MongoClient(host=host, port=port)
        # 指定数据库
        mydb = client[dbname]
        # 存放数据的数据库表名
        self.post = mydb[sheetname]

    def process_item(self, item, spider):
        data = dict(item)
        self.post.insert(data)
        print("successed")
        return item

setting设置

# -*- coding: utf-8 -*-

# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'tutorial'

SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# MONGODB 主机名
MONGODB_HOST = "47.××.**.**"
# MONGODB 端口号
MONGODB_PORT = 27017
# 数据库名称
MONGODB_DBNAME = "ip"
# 存放数据的表名称
MONGODB_SHEETNAME = "agent"

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
#USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
# Obey robots.txt rules
#对不起我私人爬虫万事不惧
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 4    #修改并发请求数

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs

#下载延迟时间
DOWNLOAD_DELAY = 3

#The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'tutorial.middlewares.TutorialSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'tutorial.middlewares.TutorialDownloaderMiddleware': 100,
}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'tutorial.pipelines.TutorialPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#开启本地缓存
HTTPCACHE_ENABLED = True
HTTPCACHE_EXPIRATION_SECS = 1
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

scrapy爬取快代理并保存mongo数据库scrapy爬取快代理并保存mongo数据库 lihang212010 发布了73 篇原创文章 · 获赞 276 · 访问量 16万+ 私信 关注
上一篇:Tutorial 1_UML与面向对象程序设计基本原则


下一篇:2020最新java学习路线