模式识别开发之项目---基于opencv的手势识别

我使用OpenCV2.4.4的windows版本+Qt4.8.3+VS2010的编译器做了一个手势识别的小程序。

本程序主要使到了Opencv的特征训练库和最基本的图像处理的知识,包括肤色检测等等。

废话不多,先看一下基本的界面设计,以及主要功能:

模式识别开发之项目---基于opencv的手势识别

相信对于Qt有一些了解的人都不会对这个界面的设计感到陌生吧!(该死,该死!)我们向下走:

紧接着是Qt导入OPenCV2.4.4的库文件:(先看一下Qt的工程文件吧)

  1. #-------------------------------------------------
  2. #
  3. # Project created by QtCreator 2013-05-25T11:16:11
  4. #
  5. #-------------------------------------------------
  6. QT       += core gui
  7. CONFIG += warn_off
  8. greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
  9. TARGET = HandGesture
  10. TEMPLATE = app
  11. INCLUDEPATH += E:/MyQtCreator/MyOpenCV/opencv/build/include
  12. SOURCES += main.cpp\
  13. handgesturedialog.cpp \
  14. SRC/GestrueInfo.cpp \
  15. SRC/AIGesture.cpp
  16. HEADERS  += handgesturedialog.h \
  17. SRC/GestureStruct.h \
  18. SRC/GestrueInfo.h \
  19. SRC/AIGesture.h
  20. FORMS    += handgesturedialog.ui
  21. #Load OpenCV runtime libs
  22. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core244
  23. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_core244d
  24. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  25. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  26. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d244
  27. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_features2d244d
  28. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  29. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  30. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engine
  31. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_haartraining_engined
  32. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  33. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  34. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui244
  35. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_highgui244d
  36. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  37. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  38. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect244
  39. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_objdetect244d
  40. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  41. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  42. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video244
  43. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_video244d
  44. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  45. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  46. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d244
  47. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_calib3d244d
  48. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  49. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  50. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib244
  51. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_contrib244d
  52. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  53. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  54. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc244
  55. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_imgproc244d
  56. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  57. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  58. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy244
  59. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_legacy244d
  60. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  61. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  62. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml244
  63. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_ml244d
  64. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  65. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  66. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo244
  67. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_photo244d
  68. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  69. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  70. win32:CONFIG(release, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244
  71. else:win32:CONFIG(debug, debug|release): LIBS += -L$$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10/lib/ -lopencv_nonfree244d
  72. INCLUDEPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10
  73. DEPENDPATH += $$PWD/../../../MyQtCreator/MyOpenCV/opencv/build/x86/vc10

当做好以上的基本配置之后,我们进行手势识别的开发:

第一:要采集到原始的图片

模式识别开发之项目---基于opencv的手势识别模式识别开发之项目---基于opencv的手势识别模式识别开发之项目---基于opencv的手势识别

采集好原始图片后进行修正,包括尺寸大小,那时我还使用到了matlab这个强大的工具,

紧接着进行图像的样本特征提取,到网上把,CSDN中有大量的关于对图像特征训练库的识别与训练,按照他们一步一步的操作模式不会有问题的饿

下面是要通过摄像头进行图像的采集,直接贴代码:

  1. void HandGestureDialog::on_pushButton_OpenCamera_clicked()
  2. {
  3. cam = cvCreateCameraCapture(0);
  4. timer->start(time_intervals);
  5. frame = cvQueryFrame(cam);
  6. ui->pushButton_OpenCamera->setDisabled (true);
  7. ui->pushButton_CloseCamera->setEnabled (true);
  8. ui->pushButton_ShowPause->setEnabled (true);
  9. ui->pushButton_SnapImage->setEnabled (true);
  10. afterSkin = cvCreateImage (cvSize(frame->width,frame->height),IPL_DEPTH_8U,1);
  11. }
  1. void HandGestureDialog::readFarme()
  2. {
  3. frame = cvQueryFrame(cam);
  4. QImage image((const uchar*)frame->imageData,
  5. frame->width,
  6. frame->height,
  7. QImage::Format_RGB888);
  8. image = image.rgbSwapped();
  9. image = image.scaled(320,240);
  10. ui->label_CameraShow->setPixmap(QPixmap::fromImage(image));
  11. gesture.SkinDetect (frame,afterSkin);
  12. /*next to opencv*/
  13. if(status_switch == Recongnise)
  14. {
  15. // Flips the frame into mirror image
  16. cvFlip(frame,frame,1);
  17. // Call the function to detect and draw the hand positions
  18. StartRecongizeHand(frame);
  19. }
  20. }

查看一下样例图片:

模式识别开发之项目---基于opencv的手势识别

开始训练的核心代码:

  1. void HandGestureDialog::on_pushButton_StartTrain_clicked()
  2. {
  3. QProgressDialog* process = new QProgressDialog(this);
  4. process->setWindowTitle ("Traning Model");
  5. process->setLabelText("Processing...");
  6. process->setModal(true);
  7. process->show ();
  8. gesture.setMainUIPointer (this);
  9. gesture.Train(process);
  10. QMessageBox::about (this,tr("完成"),tr("手势训练模型完成"));
  11. }
  1. void CAIGesture::Train(QProgressDialog *pBar)//对指定训练文件夹里面的所有手势进行训练
  2. {
  3. QString curStr = QDir::currentPath ();
  4. QString fp1 = "InfoDoc/gestureFeatureFile.yml";
  5. fp1 = curStr + "/" + fp1;
  6. CvFileStorage *GestureFeature=cvOpenFileStorage(fp1.toStdString ().c_str (),0,CV_STORAGE_WRITE);
  7. FILE* fp;
  8. QString fp2 = "InfoDoc/gestureFile.txt";
  9. fp2 = curStr + "/" + fp2;
  10. fp=fopen(fp2.toStdString ().c_str (),"w");
  11. int FolderCount=0;
  12. /*获取当前的目录,然后得到当前的子目录*/
  13. QString trainStr = curStr;
  14. trainStr += "/TraningSample/";
  15. QDir trainDir(trainStr);
  16. GestureStruct gesture;
  17. QFileInfoList list = trainDir.entryInfoList();
  18. pBar->setRange(0,list.size ()-2);
  19. for(int i=2;i<list.size ();i++)
  20. {
  21. pBar->setValue(i-1);
  22. QFileInfo fileInfo = list.at (i);
  23. if(fileInfo.isDir () == true)
  24. {
  25. FolderCount++;
  26. QString tempStr = fileInfo.fileName ();
  27. fprintf(fp,"%s\n",tempStr.toStdString ().c_str ());
  28. gesture.angleName = tempStr.toStdString ()+"angleName";
  29. gesture.anglechaName = tempStr.toStdString ()+"anglechaName";
  30. gesture.countName = tempStr.toStdString ()+"anglecountName";
  31. tempStr = trainStr + tempStr + "/";
  32. QDir subDir(tempStr);
  33. OneGestureTrain(subDir,GestureFeature,gesture);
  34. }
  35. }
  36. pBar->autoClose ();
  37. delete pBar;
  38. pBar = NULL;
  39. fprintf(fp,"%s%d","Hand Gesture Number: ",FolderCount);
  40. fclose(fp);
  41. cvReleaseFileStorage(&GestureFeature);
  42. }
  1. void CAIGesture::OneGestureTrain(QDir GestureDir,CvFileStorage *fs,GestureStruct gesture)//对单张图片进行训练
  2. {
  3. IplImage* TrainImage=0;
  4. IplImage* dst=0;
  5. CvSeq* contour=NULL;
  6. CvMemStorage* storage;
  7. storage = cvCreateMemStorage(0);
  8. CvPoint center=cvPoint(0,0);
  9. float radius=0.0;
  10. float angle[FeatureNum][10]={0},anglecha[FeatureNum][10]={0},anglesum[FeatureNum][10]={0},anglechasum[FeatureNum][10]={0};
  11. float count[FeatureNum]={0},countsum[FeatureNum]={0};
  12. int FileCount=0;
  13. /*读取该目录下的所有jpg文件*/
  14. QFileInfoList list = GestureDir.entryInfoList();
  15. QString currentDirPath = GestureDir.absolutePath ();
  16. currentDirPath += "/";
  17. for(int k=2;k<list.size ();k++)
  18. {
  19. QFileInfo tempInfo = list.at (k);
  20. if(tempInfo.isFile () == true)
  21. {
  22. QString fileNamePath = currentDirPath + tempInfo.fileName ();
  23. TrainImage=cvLoadImage(fileNamePath.toStdString ().c_str(),1);
  24. if(TrainImage==NULL)
  25. {
  26. cout << "can't load image" << endl;
  27. cvReleaseMemStorage(&storage);
  28. cvReleaseImage(&dst);
  29. cvReleaseImage(&TrainImage);
  30. return;
  31. }
  32. if(dst==NULL&&TrainImage!=NULL)
  33. dst=cvCreateImage(cvGetSize(TrainImage),8,1);
  34. SkinDetect(TrainImage,dst);
  35. FindBigContour(dst,contour,storage);
  36. cvZero(dst);
  37. cvDrawContours( dst, contour, CV_RGB(255,255,255),CV_RGB(255,255,255), -1, -1, 8 );
  38. ComputeCenter(contour,center,radius);
  39. GetFeature(dst,center,radius,angle,anglecha,count);
  40. for(int j=0;j<FeatureNum;j++)
  41. {
  42. countsum[j]+=count[j];
  43. for(int k=0;k<10;k++)
  44. {
  45. anglesum[j][k]+=angle[j][k];
  46. anglechasum[j][k]+=anglecha[j][k];
  47. }
  48. }
  49. FileCount++;
  50. cvReleaseImage(&TrainImage);
  51. }
  52. }
  53. for(int i=0;i<FeatureNum;i++)
  54. {
  55. gesture.count[i]=countsum[i]/FileCount;
  56. for(int j=0;j<10;j++)
  57. {
  58. gesture.angle[i][j]=anglesum[i][j]/FileCount;
  59. gesture.anglecha[i][j]=anglechasum[i][j]/FileCount;
  60. }
  61. }
  62. cvStartWriteStruct(fs,gesture.angleName.c_str (),CV_NODE_SEQ,NULL);//开始写入yml文件
  63. int i=0;
  64. for(i=0;i<FeatureNum;i++)
  65. cvWriteRawData(fs,&gesture.angle[i][0],10,"f");//写入肤色角度的值
  66. cvEndWriteStruct(fs);
  67. cvStartWriteStruct(fs,gesture.anglechaName.c_str (),CV_NODE_SEQ,NULL);
  68. for(i=0;i<FeatureNum;i++)
  69. cvWriteRawData(fs,&gesture.anglecha[i][0],10,"f");//写入非肤色角度的值
  70. cvEndWriteStruct(fs);
  71. cvStartWriteStruct(fs,gesture.countName.c_str (),CV_NODE_SEQ,NULL);
  72. cvWriteRawData(fs,&gesture.count[0],FeatureNum,"f");//写入肤色角度的个数
  73. cvEndWriteStruct(fs);
  74. cvReleaseMemStorage(&storage);
  75. cvReleaseImage(&dst);
  76. }
  1. void CAIGesture::SkinDetect(IplImage* src,IplImage* dst)
  2. {
  3. IplImage* hsv = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);//use to split to HSV
  4. IplImage* tmpH1 = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);//Use To Skin Detect
  5. IplImage* tmpS1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
  6. IplImage* tmpH2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
  7. IplImage* tmpS3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
  8. IplImage* tmpH3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
  9. IplImage* tmpS2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
  10. IplImage* H = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
  11. IplImage* S = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
  12. IplImage* V = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 1);
  13. IplImage* src_tmp1=cvCreateImage(cvGetSize(src),8,3);
  14. cvSmooth(src,src_tmp1,CV_GAUSSIAN,3,3); //Gaussian Blur
  15. cvCvtColor(src_tmp1, hsv, CV_BGR2HSV );//Color Space to Convert
  16. cvCvtPixToPlane(hsv,H,S,V,0);//To Split 3 channel
  17. /*********************Skin Detect**************/
  18. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(20.0,0.0,0,0),tmpH1);
  19. cvInRangeS(S,cvScalar(75.0,0.0,0,0),cvScalar(200.0,0.0,0,0),tmpS1);
  20. cvAnd(tmpH1,tmpS1,tmpH1,0);
  21. // Red Hue with Low Saturation
  22. // Hue 0 to 26 degree and Sat 20 to 90
  23. cvInRangeS(H,cvScalar(0.0,0.0,0,0),cvScalar(13.0,0.0,0,0),tmpH2);
  24. cvInRangeS(S,cvScalar(20.0,0.0,0,0),cvScalar(90.0,0.0,0,0),tmpS2);
  25. cvAnd(tmpH2,tmpS2,tmpH2,0);
  26. // Red Hue to Pink with Low Saturation
  27. // Hue 340 to 360 degree and Sat 15 to 90
  28. cvInRangeS(H,cvScalar(170.0,0.0,0,0),cvScalar(180.0,0.0,0,0),tmpH3);
  29. cvInRangeS(S,cvScalar(15.0,0.0,0,0),cvScalar(90.,0.0,0,0),tmpS3);
  30. cvAnd(tmpH3,tmpS3,tmpH3,0);
  31. // Combine the Hue and Sat detections
  32. cvOr(tmpH3,tmpH2,tmpH2,0);
  33. cvOr(tmpH1,tmpH2,tmpH1,0);
  34. cvCopy(tmpH1,dst);
  35. cvReleaseImage(&hsv);
  36. cvReleaseImage(&tmpH1);
  37. cvReleaseImage(&tmpS1);
  38. cvReleaseImage(&tmpH2);
  39. cvReleaseImage(&tmpS2);
  40. cvReleaseImage(&tmpH3);
  41. cvReleaseImage(&tmpS3);
  42. cvReleaseImage(&H);
  43. cvReleaseImage(&S);
  44. cvReleaseImage(&V);
  45. cvReleaseImage(&src_tmp1);
  46. }
  1. //To Find The biggest Countour
  2. void CAIGesture::FindBigContour(IplImage* src,CvSeq* (&contour),CvMemStorage* storage)
  3. {
  4. CvSeq* contour_tmp,*contourPos;
  5. int contourcount=cvFindContours(src, storage, &contour_tmp, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE );
  6. if(contourcount==0)
  7. return;
  8. CvRect bndRect = cvRect(0,0,0,0);
  9. double contourArea,maxcontArea=0;
  10. for( ; contour_tmp != 0; contour_tmp = contour_tmp->h_next )
  11. {
  12. bndRect = cvBoundingRect( contour_tmp, 0 );
  13. contourArea=bndRect.width*bndRect.height;
  14. if(contourArea>=maxcontArea)//find Biggest Countour
  15. {
  16. maxcontArea=contourArea;
  17. contourPos=contour_tmp;
  18. }
  19. }
  20. contour=contourPos;
  21. }
  1. //Calculate The Center
  2. void CAIGesture::ComputeCenter(CvSeq* (&contour),CvPoint& center,float& radius)
  3. {
  4. CvMoments m;
  5. double M00,X,Y;
  6. cvMoments(contour,&m,0);
  7. M00=cvGetSpatialMoment(&m,0,0);
  8. X=cvGetSpatialMoment(&m,1,0)/M00;
  9. Y=cvGetSpatialMoment(&m,0,1)/M00;
  10. center.x=(int)X;
  11. center.y=(int)Y;
  12. /*******************tO find radius**********************/
  13. int hullcount;
  14. CvSeq* hull;
  15. CvPoint pt;
  16. double tmpr1,r=0;
  17. hull=cvConvexHull2(contour,0,CV_COUNTER_CLOCKWISE,0);
  18. hullcount=hull->total;
  19. for(int i=1;i<hullcount;i++)
  20. {
  21. pt=**CV_GET_SEQ_ELEM(CvPoint*,hull,i);//get each point
  22. tmpr1=sqrt((double)((center.x-pt.x)*(center.x-pt.x))+(double)((center.y-pt.y)*(center.y-pt.y)));//计算与中心点的大小
  23. if(tmpr1>r)//as the max radius
  24. r=tmpr1;
  25. }
  26. radius=r;
  27. }
  1. void CAIGesture::GetFeature(IplImage* src,CvPoint& center,float radius,
  2. float angle[FeatureNum][10],
  3. float anglecha[FeatureNum][10],
  4. float count[FeatureNum])
  5. {
  6. int width=src->width;
  7. int height=src->height;
  8. int step=src->widthStep/sizeof(uchar);
  9. uchar* data=(uchar*)src->imageData;
  10. float R=0.0;
  11. int a1,b1,x1,y1,a2,b2,x2,y2;//the distance of the center to other point
  12. float angle1_tmp[200]={0},angle2_tmp[200]={0},angle1[50]={0},angle2[50]={0};//temp instance to calculate angule
  13. int angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;
  14. for(int i=0;i<FeatureNum;i++)//分FeatureNum层进行特征提取(也就是5层)分析
  15. {
  16. R=(i+4)*radius/9;
  17. for(int j=0;j<=3600;j++)
  18. {
  19. if(j<=900)
  20. {
  21. a1=(int)(R*sin(j*3.14/1800));//这个要自己实际画一张图就明白了
  22. b1=(int)(R*cos(j*3.14/1800));
  23. x1=center.x-b1;
  24. y1=center.y-a1;
  25. a2=(int)(R*sin((j+1)*3.14/1800));
  26. b2=(int)(R*cos((j+1)*3.14/1800));
  27. x2=center.x-b2;
  28. y2=center.y-a2;
  29. }
  30. else
  31. {
  32. if(j>900&&j<=1800)
  33. {
  34. a1=(int)(R*sin((j-900)*3.14/1800));
  35. b1=(int)(R*cos((j-900)*3.14/1800));
  36. x1=center.x+a1;
  37. y1=center.y-b1;
  38. a2=(int)(R*sin((j+1-900)*3.14/1800));
  39. b2=(int)(R*cos((j+1-900)*3.14/1800));
  40. x2=center.x+a2;
  41. y2=center.y-b2;
  42. }
  43. else
  44. {
  45. if(j>1800&&j<2700)
  46. {
  47. a1=(int)(R*sin((j-1800)*3.14/1800));
  48. b1=(int)(R*cos((j-1800)*3.14/1800));
  49. x1=center.x+b1;
  50. y1=center.y+a1;
  51. a2=(int)(R*sin((j+1-1800)*3.14/1800));
  52. b2=(int)(R*cos((j+1-1800)*3.14/1800));
  53. x2=center.x+b2;
  54. y2=center.y+a2;
  55. }
  56. else
  57. {
  58. a1=(int)(R*sin((j-2700)*3.14/1800));
  59. b1=(int)(R*cos((j-2700)*3.14/1800));
  60. x1=center.x-a1;
  61. y1=center.y+b1;
  62. a2=(int)(R*sin((j+1-2700)*3.14/1800));
  63. b2=(int)(R*cos((j+1-2700)*3.14/1800));
  64. x2=center.x-a2;
  65. y2=center.y+b2;
  66. }
  67. }
  68. }
  69. if(x1>0&&x1<width&&x2>0&&x2<width&&y1>0&&y1<height&&y2>0&&y2<height)
  70. {
  71. if((int)data[y1*step+x1]==255&&(int)data[y2*step+x2]==0)
  72. {
  73. angle1_tmp[angle1_tmp_count]=(float)(j*0.1);//从肤色到非肤色的角度
  74. angle1_tmp_count++;
  75. }
  76. else if((int)data[y1*step+x1]==0&&(int)data[y2*step+x2]==255)
  77. {
  78. angle2_tmp[angle2_tmp_count]=(float)(j*0.1);//从非肤色到肤色的角度
  79. angle2_tmp_count++;
  80. }
  81. }
  82. }
  83. int j=0;
  84. for(j=0;j<angle1_tmp_count;j++)
  85. {
  86. if(angle1_tmp[j]-angle1_tmp[j-1]<0.2)//忽略太小的角度
  87. continue;
  88. angle1[angle1count]=angle1_tmp[j];
  89. angle1count++;
  90. }
  91. for(j=0;j<angle2_tmp_count;j++)
  92. {
  93. if(angle2_tmp[j]-angle2_tmp[j-1]<0.2)
  94. continue;
  95. angle2[angle2count]=angle2_tmp[j];
  96. angle2count++;
  97. }
  98. for(j=0;j<max(angle1count,angle2count);j++)
  99. {
  100. if(angle1[0]>angle2[0])
  101. {
  102. if(angle1[j]-angle2[j]<7)//忽略小于7度的角度,因为人的手指一般都大于这个值
  103. continue;
  104. angle[i][anglecount]=(float)((angle1[j]-angle2[j])*0.01);//肤色的角度
  105. anglecha[i][anglecount]=(float)((angle2[j+1]-angle1[j])*0.01);//非肤色的角度,例如手指间的角度
  106. anglecount++;
  107. }
  108. else
  109. {
  110. if(angle1[j+1]-angle2[j]<7)
  111. continue;
  112. anglecount++;
  113. angle[i][anglecount]=(float)((angle1[j+1]-angle2[j])*0.01);
  114. anglecha[i][anglecount]=(float)((angle2[j]-angle1[j])*0.01);
  115. }
  116. }
  117. if(angle1[0]<angle2[0])
  118. angle[i][0]=(float)((angle1[0]+360-angle2[angle2count-1])*0.01);
  119. else
  120. anglecha[i][0]=(float)((angle2[0]+360-angle1[angle1count-1])*0.01);
  121. count[i]=(float)anglecount;
  122. angle1_tmp_count=0,angle2_tmp_count=0,angle1count=0,angle2count=0,anglecount=0;
  123. for(j=0;j<200;j++)
  124. {
  125. angle1_tmp[j]=0;
  126. angle2_tmp[j]=0;
  127. }
  128. for(j=0;j<50;j++)
  129. {
  130. angle1[j]=0;
  131. angle2[j]=0;
  132. }
  133. }
  134. }

模式识别开发之项目---基于opencv的手势识别

基本上对于自己使用代码创建的训练库的特征提取函数和基本的肤色检测和连通域的检测的函数的核心代码都已经贴到上面去了。

然后再看一下对于特定的手势识别的文件:

  1. void HandGestureDialog::on_pushButton_StartRecongnise_clicked()
  2. {
  3. if(cam==NULL)
  4. {
  5. QMessageBox::warning (this,tr("Warning"),tr("Please Check Camera !"));
  6. return;
  7. }
  8. status_switch = Nothing;
  9. status_switch = Recongnise;
  10. }
  1. void HandGestureDialog::StartRecongizeHand (IplImage *img)
  2. {
  3. // Create a string that contains the exact cascade name
  4. // Contains the trained classifer for detecting hand
  5. const char *cascade_name="hand.xml";
  6. // Create memory for calculations
  7. static CvMemStorage* storage = 0;
  8. // Create a new Haar classifier
  9. static CvHaarClassifierCascade* cascade = 0;
  10. // Sets the scale with which the rectangle is drawn with
  11. int scale = 1;
  12. // Create two points to represent the hand locations
  13. CvPoint pt1, pt2;
  14. // Looping variable
  15. int i;
  16. // Load the HaarClassifierCascade
  17. cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
  18. // Check whether the cascade has loaded successfully. Else report and error and quit
  19. if( !cascade )
  20. {
  21. fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
  22. return;
  23. }
  24. // Allocate the memory storage
  25. storage = cvCreateMemStorage(0);
  26. // Create a new named window with title: result
  27. cvNamedWindow( "result", 1 );
  28. // Clear the memory storage which was used before
  29. cvClearMemStorage( storage );
  30. // Find whether the cascade is loaded, to find the hands. If yes, then:
  31. if( cascade )
  32. {
  33. // There can be more than one hand in an image. So create a growable sequence of hands.
  34. // Detect the objects and store them in the sequence
  35. CvSeq* hands = cvHaarDetectObjects( img, cascade, storage,
  36. 1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
  37. cvSize(40, 40) );
  38. // Loop the number of hands found.
  39. for( i = 0; i < (hands ? hands->total : 0); i++ )
  40. {
  41. // Create a new rectangle for drawing the hand
  42. CvRect* r = (CvRect*)cvGetSeqElem( hands, i );
  43. // Find the dimensions of the hand,and scale it if necessary
  44. pt1.x = r->x*scale;
  45. pt2.x = (r->x+r->width)*scale;
  46. pt1.y = r->y*scale;
  47. pt2.y = (r->y+r->height)*scale;
  48. // Draw the rectangle in the input image
  49. cvRectangle( img, pt1, pt2, CV_RGB(230,20,232), 3, 8, 0 );
  50. }
  51. }
  52. // Show the image in the window named "result"
  53. cvShowImage( "result", img );
  54. cvWaitKey (30);
  55. }

注意该特征文件包含了手掌半握式的手势效果较好:

模式识别开发之项目---基于opencv的手势识别

多谢大家,这么长时间的阅读和浏览,小弟做的很粗糙还有一些地方自已也没有弄明白,希望各位大神批评指教!

我已把源代码上传到对应的资源中去,以便大家学习修改!

http://download.csdn.net/detail/liuguiyangnwpu/7467891

http://blog.csdn.net/berguiliu/article/details/9307495

上一篇:基于WebForm+EasyUI的业务管理系统形成之旅 -- 数据统计(Ⅳ)


下一篇:phalcon安装和输出 hello word