使用SeetaFace和Dlib实现人脸识别

使用SeetaFace和Dlib实现人脸识别

使用SeetaFace 和 Dlib 实现人脸识别

SeetaFace 介绍seetaface由中科院计算所山世光研究员带领的人脸识别研究组研发。代码基于C++实现,不依赖第三方库。开源免费可用。工程包括人脸检测、人脸对齐、人脸识别三个模块。Github 地址是:https://github.com/seetaface/SeetaFaceEngine

Dlib介绍Dlib是一个C++库,包含了许多机器学习算法,也包括人脸特征检测算法。它是跨平台的,可以应用在Windows、Linux、Mac、embedded devices、mobile phones等。它的License是Boost Software License 1.0,可以商用。Dlib的主要特点可以参考官方网站:http://dlib.net/。Dlib人脸特征检测实现了68点标定,拿到68点后就可以用于人脸识别比对和活体检测。

准备开发环境本案例使用的是Microsoft Visual Studio VC++ 2013

安装OpencvOpenCV实现了图像处理和计算机视觉方面的很多通用算法,可以用于人脸识别过程的图像处理。我在测试时使用的是opencv2.4.12。

下载SeetaFace并编译自行下载SeetaFace源码,并编译出三个lib文件和三个dll文件。编译出的结果包括:FaceIdentification.lib FaceIdentification.dllFaceAlignment.lib FaceAlignment.dllFaceDetection.lib FaceDetection.dll

下载Dlib源码Dlib 可以在其官网下载http://dlib.net/

新建工程修改配置① 新建win32控制台项目,命名为:FaceIdentificationServer。在工程目录中新建目录“include/seeta”和“include/dlib”。将项目属性改为x64。将seetaFace源码所有”.h”文件拷贝到工程目录下的include/seeta 目录下。将dlib 源代码整个“dlib”文件夹拷贝到“/include/dlib”文件夹下。② 在配置属性“VC++目录”中,修改opencv的包含目录和库目录,opencv使用x64的lib库。修改“库目录”增加OpenCV的lib目录:“\opencv\2.4.12\build\x64\vc12”;修改“包含目录”,增加:“..\include\dlib”、“..\include\seeta”、“D:\greensoftware\opencv\2.4.12\build\include”、“D:\greensoftware\opencv\2.4.12\build\include\opencv”、“D:\greensoftware\opencv\2.4.12\build\include\opencv2”。

③ 在链接器的“输入”中,加入以下几个lib:dlib.libopencv_calib3d2412d.libopencv_contrib2412d.libopencv_core2412d.libopencv_features2d2412d.libopencv_flann2412d.libopencv_gpu2412d.libopencv_highgui2412d.libopencv_imgproc2412d.libopencv_legacy2412d.libopencv_ml2412d.libopencv_objdetect2412d.libopencv_ts2412d.libopencv_video2412d.libFaceAlignment.libFaceDetection.libIdentification.lib

④将与seetaFace的6个lib文件对应的dll文件放入可执行文件目录下。⑤将seetaface源码中的三个model以及dlib人脸检测模型文件feets.dat、lbpcascade_frontalface.xml、shape_predictor_68_face_landmarks.dat放到项目执行文件夹的model文件夹下。项目整体结构:x64\Debug\model 用于放模型文件x64\Debug\data 用于放测试照片、人脸特征数据和配置文件

人脸识别实现获取照片特征123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153bool getFeature(cv::Mat src_img, float* feat_pic1){ cv::Mat src_img_gray; if (src_img.channels() != 1) cv::cvtColor(src_img, src_img_gray, cv::COLOR_BGR2GRAY); else src_img_gray = src_img; IplImage src_img_grayscaletmp = src_img_gray; IplImage *src_img_grayscale = cvCloneImage(&src_img_grayscaletmp); if (src_img_grayscale == NULL) { return false; } IplImage src_img_colortmp = src_img; IplImage *src_img_color = cvCloneImage(&src_img_colortmp); int src_im_width = src_img_grayscale->width; int src_im_height = src_img_grayscale->height; unsigned char* src_data = new unsigned char[src_im_width * src_im_height]; unsigned char* src_image_data_ptr = (unsigned char*)src_img_grayscale->imageData; unsigned char* data_ptr; int hh = 0; //获取照片数据1 data_ptr = src_data; for (hh = 0; hh < src_im_height; hh++) { memcpy(data_ptr, src_image_data_ptr, src_im_width); data_ptr += src_im_width; src_image_data_ptr += src_img_grayscale->widthStep; } seeta::ImageData src_image_data; src_image_data.data = src_data; src_image_data.width = src_im_width; src_image_data.height = src_im_height; src_image_data.num_channels = 1; // Detect faces 1 g_lock_detech.lock(); std::vector src_faces = seeta_detector.Detect(src_image_data); g_lock_detech.unlock(); int32_t face_num1 = static_cast(src_faces.size()); if (face_num1 == 0) { delete[]src_data; cvReleaseImage(&src_img_grayscale); cvReleaseImage(&src_img_color); return false; } // Detect 5 facial landmarks seeta::FacialLandmark src_points[5]; for (int k = 0; k < face_num1; k++) { g_lock_detech.lock(); seeta_alignment.PointDetectLandmarks(src_image_data, src_faces[k], src_points); g_lock_detech.unlock(); } // Release memory cvReleaseImage(&src_img_color); cvReleaseImage(&src_img_grayscale); delete[]src_data; // ImageData store data of an image without memory alignment. seeta::ImageData src_img_data(src_img.cols, src_img.rows, src_img.channels()); src_img_data.data = src_img.data; /* Extract feature: ExtractFeatureWithCrop */ g_lock_detech.lock(); seeta_recognizer.ExtractFeatureWithCrop(src_img_data, src_points, feat_pic1); g_lock_detech.unlock(); return true;} /*获取特征*/vector getFeatures(vector src_imgs){ int feat_size = seeta_recognizer.feature_size(); vector feat_pics; clock_t start; start = clock(); for (int i = 0; i < src_imgs.size(); i++){ cv::Mat src_img = src_imgs[i]; float* feat_pic1 = new float[feat_size];//n * c * h * w bool suc = getFeature(src_img, feat_pic1); if (suc){ feat_pics.push_back(feat_pic1); } } std::cout << "获取人脸特征:" << (clock() - start) << endl; return feat_pics;}string floatToString(float Num){ ostringstream oss; oss << Num; string str(oss.str()); return str;}string featureToString(float *feature){ int size = seeta_recognizer.feature_size(); string row = ""; for (int i = 0; i < size; i++){ row += floatToString(feature[i]); if (i < size - 1){ row += ","; } } return row;}

1比1人脸照片比对12345678910111213141516171819202122/*人脸比对*/float match(cv::Mat src_img, cv::Mat dist_img){ vector imgs; imgs.push_back(src_img); imgs.push_back(dist_img); vector feats = getFeatures(imgs); float sim = seeta_recognizer.CalcSimilarity(feats[0], feats[1]); for (int i = 0; i < feats.size(); i++){ delete feats[i]; } std::cout << "相似度: " << sim << endl; return sim;}

获取特征点检测活体123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345/*获取特征点*/dlib::full_object_detection getFeaturePoint(cv::Mat temp){ dlib::full_object_detection res; if (temp.rows <= 0 || temp.cols <= 0){ return res; } cv::Mat face_gray; cvtColor(temp, face_gray, CV_BGR2GRAY); //rgb类型转换为灰度类型 equalizeHist(face_gray, face_gray); //直方图均衡化 std::vector cvfaces; int flags = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH; //只检测脸最大的人 g_lock_detech.lock(); face_cascade.detectMultiScale(face_gray, cvfaces, 1.1f, 4, flags, cv::Size(30, 30)); g_lock_detech.unlock(); //|CV_HAAR_FIND_BIGGEST_OBJECT //|CV_HAAR_DO_ROUGH_SEARCH //|CV_HAAR_SCALE_IMAGE dlib::cv_image cimg(temp); if (cvfaces.size() > 0){ dlib::rectangle det; int faceSize = 0; for (int n = 0; n < cvfaces.size(); n++){ int s = cvfaces[n].width * cvfaces[n].height; if (s > faceSize){ faceSize = s; det.set_left(cvfaces[n].x); det.set_top(cvfaces[n].y); det.set_right(cvfaces[n].x + cvfaces[n].width); det.set_bottom(cvfaces[n].y + cvfaces[n].height); } } res = pose_model(cimg, det); } return res;} //DLIB 获取特征点std::vector getFeaturePoints(std::vector images){ clock_t start = clock(); std::vector objs; for (int i = 0; i < images.size(); i++){ cv::Mat temp = images[i]; if (temp.rows <= 0 || temp.cols <= 0){ continue; } dlib::full_object_detection obj = getFeaturePoint(temp); if (obj.num_parts()>0){ objs.push_back(obj); } } cout << "获取特征点:" << (clock() - start) << endl; return objs; }/*是否有张合嘴*/bool isMouseLive(std::vector ps){ if (ps.size() == 0)return 0; long min_h = 99999999999999; long max_h = 0; double min = 99999999999999; double max = 0; for (int n = 0; n < ps.size(); n++){ dlib::full_object_detection p = ps[n]; long w = p.part(54).x() - p.part(60).x(); long h = p.part(57).y() - p.part(51).y(); if (hmax_h){ max_h = h; } double bl = h*1.0 / w*1.0; if (bl < min){ min = bl; } if (bl > max){ max = bl; } } if (max <= 0){ return false; } if (min <= 0){ return false; } double p = (max - min) *1.0 / max *1.0; //std::cout << p << endl; if (p > 0.5){ return true; } else{ return false; } }//是否摇头bool isHeadLive(std::vector ps){ if (ps.size() == 0)return 0; bool lefted = false; bool righted = false; double max_right = 0; for (int n = 0; n < ps.size(); n++){ dlib::full_object_detection p = ps[n]; long w = p.part(14).x() - p.part(2).x(); long left = p.part(30).x() - p.part(2).x(); long right = p.part(14).x() - p.part(30).x(); double l = left*1.0 / right; double r = right*1.0 / left; if (l < 0.6 && !lefted){ lefted = true; } if (r < 0.6 && !righted){ righted = true; } if (lefted && righted)break; } if (lefted && righted){ return true; } return false;}//是否眨眼bool isEyeLive(std::vector ps){ //left 37,38,41,42 | 36,39 //right 43,44,46,47 | 42,45 double max_left = 0; double min_left = 9999999; double max_right = 0; double min_right = 9999999; for (int n = 0; n < ps.size(); n++){ dlib::full_object_detection p = ps[n]; long left_w = p.part(39).x() - p.part(36).x(); long right_w = p.part(45).x() - p.part(42).x(); long right_h = p.part(46).y() - p.part(44).y(); long right_h2 = p.part(47).y() - p.part(43).y(); long r = right_h + right_h2; double right_d = r*1.0 / right_w; if (right_d > max_right){ max_right = right_d; } if (right_d < min_right){ min_right = right_d; } long left_h = p.part(41).y() - p.part(37).y(); long left_h2 = p.part(40).y() - p.part(38).y(); long l = left_h + left_h2; double left_d = l * 1.0 / left_w; if (left_d > max_left){ max_left = left_d; } if (left_d < min_left){ min_left = left_d; } } double dd_left=(max_left - min_left) / max_left; double dd_right = (max_right - min_right) / max_right; cout << "eye " << dd_left << " " << dd_right << endl; if (dd_left>0.45 || dd_right > 0.45){ return true; } return false;} bool isAlive(string n, std::vector pp, bool mouse, bool head, bool eye, int relayType=1){ if (relayType == 0)return true; std::vector ps; for (int i = 0; i a){ min_area = a; } long x = (p.part(14).x() - p.part(2).x())*1.0 / 2; long y = (p.part(8).y() - p.part(27).y())*1.0 / 2; //中心点距离 long len = sqrt((x - 0)*(x - 0) + (y - 0)*(y - 0)); if (max_lenlen){ min_len = len; } } //如果脸的大小变化比较大,说明有切换图片之类的。 if ( ( (max_area - min_area)*1.0 / max_area) > 0.4){ cout << "size changed" << endl; return false; } //如果中心位置移动太大,就说明有切换图片 if ( ( (max_len - min_len) / sqrt(max_area)) > 0.2){ cout << "pos moved" << endl; return false; } bool m = false; bool h = false; bool e = false; if (mouse){ m = isMouseLive(ps); } if (head){ h = isHeadLive(ps); } if (eye){ e = isEyeLive(ps); } if (relayType == 1){ bool alive = m && h && e; return alive; } else{ bool alive = m || h || e; return alive; } }/*是否活体*/bool isAlive(string id,std::vector images,bool mouse,bool head,bool eye,int relayType){ clock_t start = clock(); std::vector ps = getFeaturePoints(images); bool isa = isAlive(id, ps, mouse, head, eye, relayType); cout << "活体认证:" << isa << " "<< (clock() - start) << endl; return isa;}

申明本开发案例仅本人用于测试人脸识别库,测试验证在一些对质量要求不是太高的场景可用。

完整代码可联系502782757@qq.com

相关推荐

为什么有的人会觉得自己做的每一个选择都是错的?
《绝地求生刺激战场》黑夜模式进入指南
产假后怎么请病假
beat365亚洲版登录

产假后怎么请病假

📅 02-09 👁️ 479