一直对爬虫这块蛮感兴趣的,所以花了点时间看了看,写了个小脚本
代码可能有点乱,毕竟Python小白,勿喷……
嗯,话不多说,放码出来
# -*- coding: UTF-8 -*-
import re
import requests headers = {"User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"} url = "http://www.xicidaili.com/nn/" context = requests.get(url,headers = headers) #ip和端口
# pattern = re.compile("<td>\d+\.\d+\.\d+\.\d+</td>\s+<td>\d+</td>")
pattern = re.compile("<td>\d+\.\d+\.\d+\.\d+</td>\s+<td>\d+</td>\s+<td>\s+<.*?</a>\s+</td>\s+<.*?</td>\s+<td>[A-Z]{2,6}</td>") # re.sub字串替换
pat = re.compile('::<.*?::<.*?:') #例:123.135.62.217:8118::<ahref="/2018-01-24/shandong">山东泰安</a>::<tdclass="country">高匿:HTTPS
#匹配规则:?::<.*?: content = pattern.findall(context.text)
for item in content:
item = item.replace("<td>","").replace("</td>","").replace("\n",":").replace(" ","")
item = pat.sub("__",item)
with open("ip.txt","a") as f:
f.write(item+"\n") #ip数
i = 0
#页面数
j = 1 #pass ip使用次数
#防止过多使用同一个ip被封,虽用的代理ip,还是感觉不太好,勿喷
#当然,ip和页面一对一又显得浪费
#所以加了这个机制
x = 0
f = open("ip.txt")
lines = f.readlines() #数组的长度,Python应该是字典
# print len(lines) while i<len(lines):
url = "http://www.xicidaili.com/nn/"+str(j) #ip类型判断
if re.findall("HTTPS",lines[i].replace("\n","")):
ip = "https://"+lines[i].replace("\n","").replace("__HTTPS","")
proxies = {
"https":ip
}
elif re.findall("HTTP",lines[i].replace("\n","")):
ip = "http://"+lines[i].replace("\n","").replace("__HTTP","")
proxies = {
"http":ip
}
else:
print "代理ip获取错误..."
exit() #判断ip是否可用
try:
response = requests.get(url,headers = headers,proxies = proxies)
except:
print "第"+str(i)+"次失败"
i = i+1
else:
context = pattern.findall(response.text)
#可用ip保存,存到ip_pass.txt
if x>8:
with open("ip_pass.txt","a") as f:
f.write(lines[i])
i = i+1
x = 1
print "第"+str(i)+"次成功"
print "."
print "."
print "."
else:
x = x+1
#保存页面信息
for item in context:
item = item.replace("<td>","").replace("</td>","").replace("\n",":").replace(" ","")
item = pat.sub("__",item)
with open("ips.txt","a") as f:
f.write(item+"\n")
print "第"+str(j)+"页爬取成功"
j = j+1
print "success"