Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

asked Dec 2 '16

sarmad gravatar image

eye landmark points

Hi

I'm using facial landmark detector (dlib) to detect eye blinks . How the eye landmarks can be imported to a file ?

I need to use eye landmarks to calculate the ration between height and width of eye and to use SVM to classify blinks

Thanks

click to hide/show revision 2
No.2 Revision

eye landmark points

Hi

I'm using facial landmark detector (dlib) to detect eye blinks . How the eye landmarks can be imported to a file ?

I need to use eye landmarks to calculate the ration between height and width of eye and to use SVM to classify blinks

Update : when I try to write landmark point to a file , different valuses are saved than the displayed landmarks in terminal windows , how to fix ?

Thanks

#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>
#include <iostream>
#include <fstream>


  using namespace dlib;
  using namespace std;

 int main(int argc, char** argv)
{  
try
{

    if (argc == 1)
    {
        cout << "Call this program like this:" << endl;
        cout << "./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg" << endl;
        cout << "\nYou can get the shape_predictor_68_face_landmarks.dat file from:\n";
        cout << "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
        return 0;
    }

    frontal_face_detector detector = get_frontal_face_detector();

    shape_predictor sp;
    deserialize(argv[1]) >> sp;


    image_window win, win_faces;
    // Loop over all the images provided on the command line.
    for (int i = 2; i < argc; ++i)
    {
        cout << "processing image " << argv[i] << endl;
        array2d<rgb_pixel> img;
        load_image(img, argv[i]);
        pyramid_up(img);

        std::vector<rectangle> dets = detector(img);
        cout << "Number of faces detected: " << dets.size() << endl;


        std::vector<full_object_detection> shapes;
        for (unsigned long j = 0; j < dets.size(); ++j)
        {
            full_object_detection shape = sp(img, dets[j]);
               cout << "number of parts: "<< shape.num_parts() << endl;


        cout << "Eye Landmark points for right eye : "<< endl;
        cout << "pixel position of 36 part:  " << shape.part(36) << endl;
        cout << "pixel position of 37 part: " << shape.part(37) << endl;
        cout << "pixel position of 38 part:  " << shape.part(38) << endl;
        cout << "pixel position of 39 part: " << shape.part(39) << endl;
        cout << "pixel position of 40 part: " << shape.part(40) << endl;
        cout << "pixel position of 41 part: " << shape.part(41) << endl;

        cout << endl;

       cout << "Eye Landmark points for left eye : "<< endl;

        cout << "pixel position of 42 part:  " << shape.part(42) << endl;
        cout << "pixel position of 43 part: " << shape.part(43) << endl;
        cout << "pixel position of 44 part:  " << shape.part(44) << endl;
        cout << "pixel position of 45 part: " << shape.part(45) << endl;
        cout << "pixel position of 46 part: " << shape.part(46) << endl;
        cout << "pixel position of 47 part: " << shape.part(47) << endl;

            shapes.push_back(shape);



           const full_object_detection& d = shapes[0];
           ofstream outputfile;
           outputfile.open("data.txt");

            for (unsigned long k = 0; k < shape.num_parts(); ++k)
            {
                outputfile<< shape.part(k).x() << " " << shape.part(k).y() << endl;

            }

        }
        win.clear_overlay();
        win.set_image(img);
        win.add_overlay(render_face_detections(shapes));

        dlib::array<array2d<rgb_pixel> > face_chips;
        extract_image_chips(img, get_face_chip_details(shapes), face_chips);
        win_faces.set_image(tile_images(face_chips));

        cout << "Hit enter to process the next image..." << endl;
        cin.get();
    }
}
catch (exception& e)
{
    cout << "\nexception thrown!" << endl;
    cout << e.what() << endl;
}
}
click to hide/show revision 3
No.3 Revision

eye landmark points

Hi

I'm using facial landmark detector (dlib) to detect eye blinks . How the eye landmarks can be imported to a file ?

I need to use eye landmarks to calculate the ration between height and width of eye and to use SVM to classify blinks

Update : when I try to write landmark point to a file , different valuses are saved than the displayed landmarks in terminal windows , how to fix ?

Thanks

 #include <dlib/opencv.h>
 #include <opencv2/highgui/highgui.hpp>
 #include <dlib/image_processing/frontal_face_detector.h>
 #include <dlib/image_processing/render_face_detections.h>
 #include <dlib/image_processing.h>
 #include <dlib/gui_widgets.h>
#include <dlib/image_io.h>
#include <iostream>
#include <fstream>


   using namespace dlib;
 using namespace std;

 int main(int argc, char** argv)
{  
main()
{
try
{
     cv::VideoCapture cap(0);
    if (argc == 1)
(!cap.isOpened())
    {
        cout << "Call this program like this:" << endl;
        cout << "./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg" << endl;
        cout << "\nYou can get the shape_predictor_68_face_landmarks.dat file from:\n";
        cout << "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" cerr << "Unable to connect to camera" << endl;
        return 0;
1;
    }

    image_window win;
    frontal_face_detector detector = get_frontal_face_detector();
     shape_predictor sp;
    deserialize(argv[1]) pose_model;
    deserialize("shape_predictor_68_face_landmarks.dat") >> sp;


    image_window win, win_faces;
pose_model;

    while(!win.is_closed())
    {
        cv::Mat temp;
        cap >> temp;

        cv_image<bgr_pixel> cimg(temp);

        // Loop over all Detect faces 
        std::vector<rectangle> faces = detector(cimg);
        // Find the images provided on the command line.
    for (int i = 2; i < argc; ++i)
    {
        cout << "processing image " << argv[i] << endl;
        array2d<rgb_pixel> img;
        load_image(img, argv[i]);
        pyramid_up(img);

        std::vector<rectangle> dets = detector(img);
        cout << "Number of faces detected: " << dets.size() << endl;


pose of each face.
        std::vector<full_object_detection> shapes;
           ofstream outputfile;
           outputfile.open("data1.csv");

        for (unsigned long j i = 0; j i < dets.size(); ++j)
        {
faces.size(); ++i)
      {  

               full_object_detection shape = sp(img, dets[j]);
pose_model(cimg, faces[i]);
               cout << "number of parts: "<< shape.num_parts() << endl;
 
        cout << "Eye Landmark points for right eye : "<< endl;
        cout << "pixel position of 36 part:  " << shape.part(36) << endl;
        cout << "pixel position of 37 part: " << shape.part(37) << endl;
        cout << "pixel position of 38 part:  " << shape.part(38) << endl;
        cout << "pixel position of 39 part: " << shape.part(39) << endl;
        cout << "pixel position of 40 part: " << shape.part(40) << endl;
        cout << "pixel position of 41 part: " << shape.part(41) << endl;

        cout << endl;

        cout << "Eye Landmark points for left eye : "<< endl;

        cout << "pixel position of 42 part:  " << shape.part(42) << endl;
        cout << "pixel position of 43 part: " << shape.part(43) << endl;
        cout << "pixel position of 44 part:  " << shape.part(44) << endl;
        cout << "pixel position of 45 part: " << shape.part(45) << endl;
        cout << "pixel position of 46 part: " << shape.part(46) << endl;
        cout << "pixel position of 47 part: " << shape.part(47) << endl;

            shapes.push_back(shape);



        double P37_41_x = shape.part(37).x() - shape.part(41).x();
        double P37_41_y=  shape.part(37).y() -shape.part(41).y() ;

        double p37_41_sqrt=sqrt((P37_41_x * P37_41_x) + (P37_41_y * P37_41_y));


       double P38_40_x = shape.part(38).x() - shape.part(40).x();
       double P38_40_y = shape.part(38).y() - shape.part(40).y();

       double p38_40_sqrt=sqrt((P38_40_x * P38_40_x) + (P38_40_y * P38_40_y));



      double P36_39_x = shape.part(36).x() - shape.part(39).x();  
      double P36_39_y = shape.part(36).y() - shape.part(39).y();

      double p36_39_sqrt=sqrt((P36_39_x * P36_39_x) + (P36_39_y * P36_39_y));



     double EAR= p37_41_sqrt +  p38_40_sqrt/2* p36_39_sqrt;


    cout << "EAR value =  " << EAR << endl;


  shapes.push_back(pose_model(cimg, faces[i]));


   const full_object_detection& d = shapes[0];
           ofstream outputfile;
           outputfile.open("data.txt");

            for (unsigned long k = 0; k < shape.num_parts(); ++k)
            {
                outputfile<< shape.part(k).x() << " " << shape.part(k).y() << endl;

             }

        }
        win.clear_overlay();
        win.set_image(img);
win.set_image(cimg);
        win.add_overlay(render_face_detections(shapes));

        dlib::array<array2d<rgb_pixel> > face_chips;
        extract_image_chips(img, get_face_chip_details(shapes), face_chips);
        win_faces.set_image(tile_images(face_chips));

        cout << "Hit enter to process the next image..." << endl;
        cin.get();
    }
}
catch (exception& catch(serialization_error& e)
{
    cout << "\nexception thrown!" << endl;
    cout "You need dlib's default face landmarking model file to run this example." << endl;
    cout << "You can get it from the following URL: " << endl;
    cout << "   http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
    cout << endl << e.what() << endl;
}
catch(exception& e)
{
    cout << e.what() << endl;
}
 }