python3.6 urllib.request库实现简单的网络爬虫、下载图片

#更新日志:
#0418 爬取页面商品URL
#0421 更新 添加爬取下载页面图片功能
#0423 更新 添加发送邮件功能
# 优化 爬虫异常处理、错误页面及空页面处理
# 优化 爬虫关键字黑名单、白名单,提高效率

################################################################# 
#author: 陈月白
#_blogs: http://www.cnblogs.com/chenyuebai/
#################################################################  1 # -*- coding: utf-8 -*-
import urllib.request
import sys
import traceback
import re
import socket
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr socket.setdefaulttimeout(15.0) class CRAWLER():
#初始化变量
def __init__(self):
self.pageIndex = 0
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = {'User-Agent':self.user_agent} #传入网页链接,抓取页面html数据
def get_page(self,url,black_keyword_list=[],must_keyword_list=[]):
try:
page_data = ""
request = urllib.request.Request(url,headers=self.headers)
response = urllib.request.urlopen(request,timeout=10)
page_data = response.read().decode()
#print("####################\n",page_data)
except:
print("get_page %s catch a error,now return"%url)
#traceback.print_exc()
return page_data #设置黑名单关键字过滤无效网页,即page_data中若存在该关键字则视为page_data为空
if black_keyword_list:
for black_keyword in black_keyword_list:
if black_keyword in page_data:
print("black_keyword =",black_keyword)
page_data = ""
return page_data #设置page_data必须包含的关键字,若不含则视为page_data为空
if must_keyword_list:
for must_keyword in must_keyword_list:
if not must_keyword in page_data:
print("must_keyword: [%s] is not in page_data,now let page_data is empty!"%must_keyword)
page_data = ""
return page_data if page_data == '':
print("EXEC:get_page(%s) failed,page_data is empty..."%url)
return page_data
else:
print("EXEC:get_page(%s)success!"%url)
return page_data #入口,传入url,初步筛选信息
def select_items_from_url(self,url,flag,black_keyword_list=[],must_keyword_list=[]):
print("url =",url)
print("flag =",flag)
page_data = self.get_page(url,black_keyword_list,must_keyword_list)
#print("page_data =",page_data)
if page_data == "":
print("page_data =",page_data)
return page_data
#print("-----",page_data) pattern = re.compile(flag,re.S)
print("pattern =",pattern)
items = re.findall(pattern,page_data)
#print("FUNC:select_items_from_url items =",items)
if items == "":
print("EXEC:select_items_from_url failed,select items is empty")
return items
else:
print("EXEC:select_items_from_url success!")
# print("items =",items)
return items def load_image_by_imageUrl(self,image_url,image_load_fullpath):
print("image_url =",image_url)
print("image_load_fullpath =",image_load_fullpath)
# try:
# urllib.request.urlretrieve(image_url,image_load_fullpath)
# except:
# print("CATCH AN ERROR FUNC:load_image_by_imageUrl %s failed..."%image_url)
# return
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36')]
urllib.request.install_opener(opener) print("now start to load %s"%image_url)
urllib.request.urlretrieve(image_url,image_load_fullpath)
print("FUNC:load_image_by_imageUrl %s success!"%image_url)
except:
print("CATCH AN ERROR FUNC:load_image_by_imageUrl %s failed..."%image_url)
return def start(self):
pass def send_email(self,receive_user,topic_name,body_text):
send_user = "85********2@qq.com"
send_passwd = "********" try:
msg = MIMEText(body_text,'plain','utf-8') #邮件内容
msg['From'] = formataddr(["********",send_user]) #收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = topic_name #邮件主题s server = smtplib.SMTP("smtp.qq.com",25)
server.set_debuglevel(1)
server.connect("smtp.qq.com")
server.ehlo()
server.starttls()
server.ehlo()
server.login(send_user,send_passwd) #登录
server.sendmail(send_user,receive_user,msg.as_string())
server.quit()
print("send mail to %s success!"%receive_user)
except:
print("send mail to %s failed!"%receive_user)


#main
def get_goods_image_from_suning():
suning = CRAWLER()
flag = '<img class="search-loading" width="220" height="220" src2="(.*?)"></a>'
page_num = 5
name_index = 0
try:
for i in range(page_num):
items = suning.select_items_from_url('https://list.suning.com/0-20006-%s.html'%i,flag)
#print(items)
if items == "":
continue
else:
for item in items:
#print(item)
load_image_fullpath = r"E:\\workSpace\\TMP\\%s.jpg"%name_index
suning.load_image_by_imageUrl("http:%s"%item,load_image_fullpath)
name_index = name_index + 1
print("FUNC:get_goods_image_from_suning success! image load path is :%s"%load_image_fullpath)
except:
print("CATCH AN ERROR FUNC:get_goods_image_from_suning")
pass #main
def get_adu_image_from_9():
socket.setdefaulttimeout(5.0)
adu = CRAWLER() flag = '<img src=".*?" file="(.*?)".*?onmouseover=.*?alt=.*?/>'
page_index = 171186 #160202 #150007
black_keyword_list = ["您无权进行当前操作,原因如下","对不起,找不到页面!<br>"]
must_keyword_list = ["attachments"] image_download_num = 0
while True:
try:
print("------------------------------------------------------------------------")
items = adu.select_items_from_url('http://********tid=%s'%page_index,flag,black_keyword_list,must_keyword_list)
page_index = page_index + 1
#print(items)
if items == "":
print("")
continue
else:
for item in items:
#print("tettt_item =",item)
image_name = item.split("/")[1]
print("image_name =",image_name)
load_image_fullpath = r"E:\\workSpace\\TMP\\ad_image\\%s_%s"%(page_index-1,image_name)
adu.load_image_by_imageUrl("http://********/%s"%item,load_image_fullpath)
image_download_num = image_download_num + 1
print("FUNC:get_adu_image_from_9 success! image load path is :%s"%load_image_fullpath)
print("image_download_num now is %s \n"%image_download_num)
except:
print("CATCH AN ERROR FUNC:get_adu_image_from_9\n") def love_letter():
love_ch = CRAWLER()
love_ch.send_email(["50********9@qq.com","46********@qq.com"],"LOVE MAIL 05","我爱你!") ############################################################################
def main():
#get_adu_image_from_9()
love_letter() main()

#执行结果

1.爬取图片(大概运行1小时,效率还凑合):

python3.6 urllib.request库实现简单的网络爬虫、下载图片

2.发送邮件:

python3.6 urllib.request库实现简单的网络爬虫、下载图片

上一篇:视频直播技术(七):Ijkplayer切换网络时停止播放的问题处理


下一篇:Python Django 之 基于JQUERY的AJAX 登录页面