Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Got it working! :) Here is the code to detect faces and facial landmarks in a video stream using the LBF Facemark. There were a few modifications I had to make to the tutorial code in order to accomplish this (I marked them in the code, not sure if these were my errors or the tutorial's).

In order to use the code you'll need OpenCV 3.4.0 with contrib, the lbpcascade_frontalface.xml from here and the lbfmodel.yaml from here.

Cheers! Thank you for adding facial landmarks to the library! :)

#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"

#include <iostream>

struct Conf 
{
   cv::String model_path;
    double scaleFactor;
    Conf (cv::String s, double d) 
    {
        model_path = s;
        scaleFactor = d;
        face_detector.load(model_path);
    };

    cv::CascadeClassifier face_detector;
};

bool myDetector(cv::InputArray image, cv::OutputArray faces, Conf *conf) 
{
    cv::Mat gray;

    if (image.channels() > 1)
        cvtColor(image, gray, cv::COLOR_BGR2GRAY);
    else
        gray = image.getMat().clone();

    cv::equalizeHist(gray, gray);

    std::vector<cv::Rect> faces_;
    //!!!changed face_cascade to face_detector to resolve error!!!
    conf->face_detector.detectMultiScale(gray, faces_, conf->scaleFactor, 2, cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
    cv::Mat(faces_).copyTo(faces);

    return true;
}

int main()
{
   //create instance of lbf facemark model
   cv::face::FacemarkLBF::Params params;
   params.n_landmarks = 68; // number of landmark points
   params.initShape_n = 10; // number of multiplier for make data augmentation
   params.stages_n = 5; // amount of refinement stages
   params.tree_n = 6; // number of tree in the model for each landmark point
   params.tree_depth = 5; //he depth of decision tree
   params.model_filename = "lbfmodel.yaml"; //path name to saved lbf facemark model
   cv::Ptr<cv::face::Facemark> facemark = cv::face::FacemarkLBF::create(params);

   facemark->loadModel(params.model_filename);

   Conf config("lbpcascade_frontalface.xml", 1.4);
   //!!!add cast here to resolve error!!!
   facemark->setFaceDetector((cv::face::FN_FaceDetector)myDetector, &config); // we must guarantee proper lifetime of "config" object

   //vector to store face detections
   std::vector<cv::Rect> faces;

   //vector to store face landmarks
   std::vector<std::vector<cv::Point2f> > landmarks;

   //open stream to default video device
   cv::VideoCapture stream(0);

   //check if video device has been initialized
   if (!stream.isOpened())
   {
       std::cout << "Cannot open camera";
   }

   while (true) 
   {
       //read a frame at a time from video device
       cv::Mat frame;
       stream.read(frame);

       //clear out previous detections
       faces.clear();
       landmarks.clear();

       //detect faces in frame, store detected face rects in face detections vector
       //!!!removed config argument to resolve error!!!
       facemark->getFaces(frame, faces);

       //find face landmarks
       facemark->fit(frame, faces, landmarks);

       //if faces are detected, draw rectangle around detected area in frame
       if (faces.size() > 0)
           for (unsigned int i = 0; i < faces.size(); i++)
               cv::rectangle(frame, faces.at(i).br(), faces.at(i).tl(), cv::Scalar(255, 0, 255));

       //if faces are detected, draw face landmarks
       if (faces.size() > 0)
           for (unsigned int j = 0; j < faces.size(); j++)
               cv::face::drawFacemarks(frame, landmarks.at(j), cv::Scalar(0, 255, 255));

       //display frame
       imshow("Webcam", frame);

       //wait for keypress to close video stream
       if (cv::waitKey(30) >= 0)
           break;
   }

   return 0;
}

Got it working! :) Here is the code to detect faces and facial landmarks in a video stream using the LBF Facemark. There were a few modifications I had to make to the tutorial code in order to accomplish this (I marked them in the code, not sure if these were my errors or the tutorial's).

In order to use the code you'll need OpenCV 3.4.0 with contrib, the lbpcascade_frontalface.xml from here and the lbfmodel.yaml from here.

Cheers! Thank you for adding facial landmarks to the library! :)

#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"

#include <iostream>

struct Conf 
{
    cv::String model_path;
    double scaleFactor;
    Conf (cv::String s, double d) 
    {
        model_path = s;
        scaleFactor = d;
        face_detector.load(model_path);
    };

    cv::CascadeClassifier face_detector;
};

bool myDetector(cv::InputArray image, cv::OutputArray faces, Conf *conf) 
{
    cv::Mat gray;

    if (image.channels() > 1)
        cvtColor(image, gray, cv::COLOR_BGR2GRAY);
    else
        gray = image.getMat().clone();

    cv::equalizeHist(gray, gray);

    std::vector<cv::Rect> faces_;
    //!!!changed face_cascade to face_detector to resolve error!!!
    conf->face_detector.detectMultiScale(gray, faces_, conf->scaleFactor, 2, cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
    cv::Mat(faces_).copyTo(faces);

    return true;
}

int main()
{
   //create instance of lbf facemark model
   cv::face::FacemarkLBF::Params params;
   params.n_landmarks = 68; // number of landmark points
   params.initShape_n = 10; // number of multiplier for make data augmentation
   params.stages_n = 5; // amount of refinement stages
   params.tree_n = 6; // number of tree in the model for each landmark point
   params.tree_depth = 5; //he depth of decision tree
   params.model_filename = "lbfmodel.yaml"; //path name to saved lbf facemark model
   cv::Ptr<cv::face::Facemark> facemark = cv::face::FacemarkLBF::create(params);

   facemark->loadModel(params.model_filename);

   Conf config("lbpcascade_frontalface.xml", 1.4);
   //!!!add cast here to resolve error!!!
   facemark->setFaceDetector((cv::face::FN_FaceDetector)myDetector, &config); // we must guarantee proper lifetime of "config" object

   //vector to store face detections
   std::vector<cv::Rect> faces;

   //vector to store face landmarks
   std::vector<std::vector<cv::Point2f> > landmarks;

   //open stream to default video device
   cv::VideoCapture stream(0);

   //check if video device has been initialized
   if (!stream.isOpened())
   {
       std::cout << "Cannot open camera";
   }

   while (true) 
   {
       //read a frame at a time from video device
       cv::Mat frame;
       stream.read(frame);

       //clear out previous detections
       faces.clear();
       landmarks.clear();

       //detect faces in frame, store detected face rects in face detections vector
       //!!!removed config argument to resolve error!!!
       facemark->getFaces(frame, faces);

       //find face landmarks
       facemark->fit(frame, faces, landmarks);

       //if faces are detected, draw rectangle around detected area in frame
       if (faces.size() > 0)
           for (unsigned int i = 0; i < faces.size(); i++)
               cv::rectangle(frame, faces.at(i).br(), faces.at(i).tl(), cv::Scalar(255, 0, 255));

       //if faces are detected, draw face landmarks
       if (faces.size() > 0)
           for (unsigned int j = 0; j < faces.size(); j++)
               cv::face::drawFacemarks(frame, landmarks.at(j), cv::Scalar(0, 255, 255));

       //display frame
       imshow("Webcam", frame);

       //wait for keypress to close video stream
       if (cv::waitKey(30) >= 0)
           break;
   }

   return 0;
}