nodejs写的一个网页爬虫例子(坏链率)

因为工作需要,用nodejs写了个简单的爬虫例子,之前也没用过nodejs,连搭环境加写大概用了5天左右,so。。。要多简陋有多简陋,放这里给以后的自己看~~

整体需求是:给一个有效的URL地址,返回该网页上所有无效链接的百分比(坏链率)

第一个文件:计算环链率 urlSpider.js

 /*================================================
@author MissUU
链接抓取思路: 1. 获取页面内容
2. 正则取得所有<a>
3. 进一步取得href属性值,如果首位是“则剔除,不是http开头加上域名(javascript开头除外)
4.正则验证是否是常见URL格式
================================================*/
var http = require('http');
var async = require('async');
var dbHandle = require('./dbHandle.js'); //主程序
var runUrlSpider = function(obj, callback){
//10s timeout
var request_timer = setTimeout(function() {
req.abort();
console.log('Request Timeout.');
}, 10000); var urlBadLink = new UrlBadLink();
var html='';
var req = http.get(obj.url, function(res) { clearTimeout(request_timer); res.setEncoding('utf8');
res.on('data', function (chunk) {
html += chunk;
}).on('end', function(){
console.log('*******开始提取有效链接地址******');
console.log(new Date().toLocaleString());
console.log(obj.url);
urlBadLink.host = obj.url;
urlBadLink.id = obj.id;
matchURL(html, urlBadLink, function(){
callback();
});
});
}); req.on('error', function(e) {
console.log('problem with request: ' + e.message);
callback();
});
} //this is the entrance of code
var main = function(){
var urlArray = dbHandle.showUrls(1, function(result){
async.eachSeries(result, runUrlSpider, function(err){
console.log('******this is the end, haha*******');
});
});
// console.log(urlArray); }; main(); /*
* 用于异步放送get请求
*
* @param {string} content 原始页面信息
* @param {string} host 主域名
*/
function matchURL(content, urlBadLink, callend){
var host = urlBadLink.host;
var anchor = /<a\s[^>]*>/g;
var matches = content.match(anchor);
var badLink = 0;
var flag = 0;
var HttpGet = function(url,callback){
//10s timeout
var request_timer = setTimeout(function() {
req.abort();
console.log('Request Timeout.');
}, 10000); var req = http.get(url, function(res) {
clearTimeout(request_timer); res.on('data', function () {
}).on('end', function(){
console.log(++flag + ": " + url + ' response status: ' + res.statusCode); if(!(res.statusCode >= 200 && res.statusCode < 400)){
console.log('-----------------------');
badLink++;
} callback();
});
});
req.on('error', function(err){
console.log(++flag + ": " + 'problem with request: ' + err.message);
badLink++;
callback();
});
}; var urls = filterUrl(matches,host); if(urls !== null){
var totalLink = urls.length;
//console.log(urls);
async.eachSeries(urls, HttpGet, function(err){
// var urlBadLink = new UrlBadLink(host,totalLink, badLink);
// console.log("坏链个数为: " + urlBadLink.badCounts);
// console.log("坏链率为: " + urlBadLink.getRate());
urlBadLink.total = totalLink;
urlBadLink.badCounts = badLink;
//data store puts here
dbHandle.updateBadLink(urlBadLink);
callend();
});
}else{
console.log('no links found');
urlBadLink.total = 10;
urlBadLink.badCounts = 0;
dbHandle.updateBadLink(urlBadLink);
callend();
}
} //正则取得href属性值
function URLFommat(strUrl,host){ var urlPatten = /href=[\'\"]?([^\'\"]*)[\'\"]?/i;
var temp = urlPatten.exec(strUrl); if(temp!= null){
var url = temp[0].substring(6,temp[0].length-1).trim(); if(url.indexOf("\"") != -1){
url = url.slice(url.indexOf("\"") + 1);
} if(url.charAt(0) == "/"){
url = url.slice(1);
return host + url;
}else if((url.indexOf("http") == -1)&&
(url.indexOf("javascript") == -1)){
return host + url;
}else
return url;
}else
return null;
} //test URLFommat
//var test = "http://baidu.com";
// var test1 = " \"http://baidu.com";
//var test2 = "/wenhao";
//console.log(URLFommat(test,"www.sina.com.cn"));
//console.log(URLFommat(test1,"www.sina.com.cn"));
//console.log(URLFommat(test2,"www.sina.com.cn")); //测试是否为常见url格式
function IsURL(strUrl) {
if(strUrl != null){
var regular = /^\b(((http?|ftp):\/\/)?[-a-z0-9]+(\.[-a-z0-9]+)*\.(?:com|edu|gov|int|mil|net|org|biz|info|name|museum|asia|coop|aero|[a-z][a-z]|((25[0-5])|(2[0-4]\d)|(1\d\d)|([1-9]\d)|\d))\b(\/[-a-z0-9_:\@&?=+,.!\/~%\$]*)?)$/i;
if (regular.test(strUrl)) {
return true;
}
else {
return false;
}
}else
return false;
} //对象
function UrlBadLink(id, host, total, badCounts){
this.id = id;
this.host = host;
this.total = total;
this.badCounts = badCounts; if(typeof this.getRate != "function"){
UrlBadLink.prototype.getRate = function(){
var output = Number(Math.round(this.badCounts/this.total*10000)/100).toFixed(2)+'%';
return output;
};
}
} function filterUrl(arr,host){ if(arr === null)
return null;
var output = [];
arr.forEach(function(item,index,array){
//console.log(item);
var formatURL = URLFommat(item,host); if(IsURL(formatURL)){
output.push(formatURL);
}//if
});//forEach return output;
}

第二个文件:将数据存库,dbHandle.js

/**
* @author MissUU
* @des MySql基本操作
* API: https://github.com/felixge/node-mysql
*/ var mysql = require('mysql'); mysql.createConnection('mysql://root:apple@localhost/test?debug=false'); var pool = mysql.createPool({
host : '10.102.1.00',
user : 'root',
password : 'root',
database : 'test',
connectionLimit: 15
}); //读取urls
exports.showUrls = function (groupId, callback){ console.log('this is showUrl()');
pool.getConnection(function(err, conn){ if (err) {
console.log("connection error!");
console.log(err);
} conn.query('SELECT id,realurl as url FROM t_site WHERE siteGroupId = ?',groupId, function(err, result){
if(err){
console.log(err.message);
} conn.release();
if(result.length){
// console.log(result instanceof Array);
callback(result);
return true;
}else{
callback('');
return false;
}
});
});
}; exports.updateBadLink = function (urlBadLink){
//若不含数据则不插入
if (!!urlBadLink) { pool.getConnection(function(err, conn){ if (err) {
console.log("connection error!");
console.log(err);
} var updateSql = "UPDATE a_qualityinfo SET brokenRate = '"+ urlBadLink.getRate() +"' WHERE siteId = " + urlBadLink.id; console.log(updateSql); conn.query(updateSql, function(err, result){
if(err){
console.log(err.message);
console.log('update fail');
} conn.release();
console.log('update success');
});// conn.query
});//pool.getConnection
}
};

代码后期还会改动,这里有几点需要注意的:

1、http.get有时会一直等待响应,所以一定要判断下,超时则认为出错,要不程序就卡住了。。。= =!

2、注意callback的使用,要不然很难规范执行顺序的,用过nodejs的都懂得。。。

上一篇:Android Studio查找功能(搜索功能)及快捷键


下一篇:宏定义中使用extern