利用html5、websocket和opencv实现人脸检测

最近学习人脸识别相关的东西,在MFC下使用OpenCV做了一个简单的应用。训练需要较多的数据,windows应用程序终究还是不方便,于是想着做成CS模式:检测识别都放在服务器端,视频获取和显示都放在网页端。

在网上找了一些资料,实现了简单的人脸检测。人脸识别只要在这个框架上加点代码就行。主要参考了下面这篇文章:

http://www.open-open.com/home/space-361-do-blog-id-8960.html

jetty版本:jetty-9.2.17.v20160517

javacv版本:1.2

首先是html代码,主要实现:

  1. 获取视频并显示(html5, webrtc, javascript);
  2. 通过websocket传输视频帧;
  3. 接收并显示服务器端返回的图像数据(包含人脸检测结果)
 <!doctype html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<title>FaceDetect</title>
</head>
<body> <div style="visibility:hidden; width:0; height:0;">
<canvas id="canvas" width="320" height="240"></canvas>
</div> <div>
<video id="video" autoplay style="display: inline;"></video>
<img id="target" style="display:inline;"/>
</div> <script type="text/javascript"> var ws = new WebSocket("ws://127.0.0.1:2014/");
ws.binaryType = "arraybuffer"; ws.onopen = function() {
ws.send("I'm client");
}; ws.onmessage = function (evt) {
var bytes = new Uint8Array(evt.data);
var data = "";
var len = bytes.byteLength;
for (var i = 0; i < len; ++i) {
data += String.fromCharCode(bytes[i]);
}
var img = document.getElementById("target");
img.src = "data:image/png;base64,"+window.btoa(data);
}; ws.onclose = function() {
alert("Closed");
}; ws.onerror = function(err) {
alert("Error: " + err);
}; var getUserMedia = (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia); var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var ctx = canvas.getContext('2d'); getUserMedia.call(navigator, {
video: true,
audio: true
}, function(localMediaStream) {
video.src = window.URL.createObjectURL(localMediaStream);
video.onloadedmetadata = function(e) {
console.log("Label: " + localMediaStream.label);
console.log("AudioTracks" , localMediaStream.getAudioTracks());
console.log("VideoTracks" , localMediaStream.getVideoTracks());
};
}, function(e) {
console.log('Reeeejected!', e);
}); function dataURItoBlob(dataURI) {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString;
if (dataURI.split(',')[0].indexOf('base64') >= 0)
byteString = atob(dataURI.split(',')[1]);
else
byteString = unescape(dataURI.split(',')[1]); // separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]; // write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length);
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
} return new Blob([ia], {type:mimeString});
} timer = setInterval(
function () {
ctx.drawImage(video, 0, 0, 320, 240);
var data = canvas.toDataURL('image/jpeg', 1.0);
newblob = dataURItoBlob(data);
ws.send(newblob);
}, 250);
</script>
</body> </html>

facedetect.html

然后是服务器端代码(jetty, websocket, javacv),主要实现:

  1. 接收客户传送的视频帧数据;
  2. 使用JavaCV实现人脸检测;
  3. 在原始图像上绘制检测结果,将新图像返回给客户
 package com.husthzy.face;

 import org.eclipse.jetty.server.Server;

 public class WebsocketServer extends Thread {
@Override
public void run() {
super.run(); try {
Server server = new Server(2014);
server.setHandler(new FaceDetectionHandler());
server.setStopTimeout(0);
server.start();
server.join();
} catch (Exception e) {
e.printStackTrace();
}
} public static void main(String[] args) {
WebsocketServer mWebSocketServer = new WebsocketServer();
mWebSocketServer.start();
}
}

WebsocketServer.java

 package com.husthzy.face;

 import static org.bytedeco.javacpp.opencv_core.CV_8UC1;
import static org.bytedeco.javacpp.opencv_imgcodecs.cvDecodeImage;
import static org.bytedeco.javacpp.opencv_imgproc.COLOR_BGRA2GRAY;
import static org.bytedeco.javacpp.opencv_imgproc.cvtColor;
import static org.bytedeco.javacpp.opencv_imgproc.equalizeHist;
import static org.bytedeco.javacpp.opencv_imgproc.rectangle; import java.awt.image.BufferedImage;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList; import javax.imageio.ImageIO; import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.opencv_core;
import org.bytedeco.javacpp.opencv_core.IplImage;
import org.bytedeco.javacpp.opencv_core.Mat;
import org.bytedeco.javacpp.opencv_core.Rect;
import org.bytedeco.javacpp.opencv_core.RectVector;
import org.bytedeco.javacpp.opencv_core.Scalar;
import org.bytedeco.javacpp.opencv_objdetect.CascadeClassifier;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.Java2DFrameConverter;
import org.bytedeco.javacv.OpenCVFrameConverter.ToMat;
import org.eclipse.jetty.websocket.api.Session;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError;
import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage;
import org.eclipse.jetty.websocket.api.annotations.WebSocket;
import org.eclipse.jetty.websocket.server.WebSocketHandler;
import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory; @WebSocket
public class FaceDetectionHandler extends WebSocketHandler { private static final String CASCADE_FILE = "haarcascade_frontalface_alt.xml"; private Session mSession;
private static ArrayList<FaceDetectionHandler> sessions = new ArrayList<FaceDetectionHandler>(); private CascadeClassifier face_cascade = new CascadeClassifier(CASCADE_FILE); public static ArrayList<FaceDetectionHandler> getAllSessions() {
return sessions;
} @Override
public void configure(WebSocketServletFactory factory) {
factory.register(FaceDetectionHandler.class);
factory.getPolicy().setMaxBinaryMessageSize(1024 * 512);
} @OnWebSocketClose
public void onClose(int statusCode, String reason) {
sessions.remove(this);
System.out.println(
"Close: statusCode = " + statusCode + ", reason = " + reason + ", sessions = " + sessions.size());
} @OnWebSocketError
public void onError(Throwable t) {
System.out.println("Error: " + t.getMessage());
} @OnWebSocketConnect
public void onConnect(Session session) {
mSession = session;
sessions.add(this); System.out.println("Connect: " + session.getRemoteAddress().getAddress());
} @OnWebSocketMessage
public void onMessage(String message) {
System.out.println("Message: " + message);
} @OnWebSocketMessage
public void onBinaryMessage(byte data[], int offset, int length) {
System.out.println("Binary Message len:" + length);
if (length > 10000) {
try {
byte[] sdata = process(data);
ByteBuffer byteBuffer = ByteBuffer.wrap(sdata);
mSession.getRemote().sendBytes(byteBuffer);
byteBuffer.clear();
} catch (IOException e) {
e.printStackTrace();
}
}
} public byte[] process(byte data[]) {
IplImage originalImage = cvDecodeImage(opencv_core.cvMat(1, data.length, CV_8UC1, new BytePointer(data))); Mat videoMat = new Mat(originalImage);
Mat videoMatGray = new Mat();
// Convert the current frame to grayscale:
cvtColor(videoMat, videoMatGray, COLOR_BGRA2GRAY);
equalizeHist(videoMatGray, videoMatGray); // Point p = new Point();
RectVector faces = new RectVector();
face_cascade.detectMultiScale(videoMatGray, faces);
for (int i = 0; i < faces.size(); i++) {
Rect face_i = faces.get(i); //Mat face = new Mat(videoMatGray, face_i);
// If fisher face recognizer is used, the face need to be
// resized.
// resize(face, face_resized, new Size(im_width, im_height),
// 1.0, 1.0, INTER_CUBIC); // Now perform the prediction, see how easy that is:
// int prediction = lbphFaceRecognizer.predict(face); // And finally write all we've found out to the original image!
// First of all draw a green rectangle around the detected face:
rectangle(videoMat, face_i, new Scalar(0, 255, 0, 1)); System.out.println("face pos: x:" + face_i.x() + " y:" + face_i.y()); // Create the text we will annotate the box with:
//String box_text = "Prediction = " + prediction;
// Calculate the position for annotated text (make sure we don't
// put illegal values in there):
//int pos_x = Math.max(face_i.tl().x() - 10, 0);
//int pos_y = Math.max(face_i.tl().y() - 10, 0);
// And now put it into the image:
//putText(videoMat, box_text, new Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0));
} // JavaCVUtil.imShow(videoMat, "test"); return getMatByteBuffer(videoMat);
} private byte[] getMatByteBuffer(Mat m) {
byte[] result = null;
try {
ToMat convert = new ToMat();
Frame frame = convert.convert(m);
Java2DFrameConverter java2dFrameConverter = new Java2DFrameConverter();
BufferedImage bufferedImage = java2dFrameConverter.convert(frame);
ByteArrayOutputStream out = new ByteArrayOutputStream();
ImageIO.write(bufferedImage, "png", out);
result = out.toByteArray();
out.close();
} catch (IOException exception) {
exception.printStackTrace();
}
return result;
}
}

FaceDetectionhandler.java

上一篇:HTML5的Websocket(理论篇 I)


下一篇:jquery autocomplete插件结合ajax使用