Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Eye Blink detection OpenCV C++

I am trying to write an Blink detection but it does not work properly. It finds the face and eyes properly.then i trying to find a circle in the eye area (Pupil) but it is not always found and when it is found it detects blink although there is no(the counter then goes ever higher). I have tried different methods and filter (HoughCircles, Canny,threshold, medianBlur, smooth) but it does not change. I hope someone can help me and sorry for my bad English. Here is my code:

#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <iomanip>
#include <sstream>
#include <string>

#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>#include <opencv2\video\tracking.hpp>
#include "opencv2/opencv.hpp"

// Include OpenCV's C++ Interface
#include "opencv2/opencv.hpp"


const float eyeTop = 0.26f;     //y
const float eyeSide = 0.16f;  //x
const float eyeHeight = 0.28f;  //h
const float eyeWidth = 0.65f;       //w
const int calibrationDefault = 100;

int blinknumber =0;
int calibrationFace = calibrationDefault;


bool leftEyeOpen = true;
bool rightEyeOpen = true;

using namespace cv;
using namespace std;

const std::string casceye_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_mcs_eyepair_big.xml";
const std::string face_cascade_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_frontalface_alt.xml";

Mat lastFace;
Mat actualFace;

void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea);
Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace);
void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber);
void getEyes(Mat &face, Mat &eye);

namespace patch
{
template < typename T > string to_string(T n )
{
ostringstream stm ;
stm << n ;
return stm.str() ;
}
}





// main   

int main()
{
 Rect faceArea; 
 CascadeClassifier cascFace, casceye;

if (!cascFace.load(face_cascade_name)){ printf("--(!)Error loading face cascade\n"); return -1; };
if (!casceye.load(casceye_name)){ printf("--(!)Error loading eyes cascade\n"); return -1; };


cout << "\n\tESC - Programm beenden\n\tc - zaehler auf 0 setzen\n\n";

namedWindow("Blinzel Erkennung", CV_WINDOW_AUTOSIZE);

VideoCapture capture(0);
if (!capture.isOpened())
{
cout<<"Kamera wurde nicht gefunden!"<<endl;
return 1;
}

Mat Image;

while (1)
{
    Mat GrayImage;
    capture >> Image;

    if (Image.empty()){                                         
    continue;
    }

    flip(Image, Image, 1);                                  
    cvtColor(Image, GrayImage, CV_BGR2GRAY);

headTracing(GrayImage, Image, casceye , cascFace, faceArea);        


switch (waitKey(2)) {
case 27:
 return 0;                                                              
    break;
case120:                                                                        
    calibrationFace = 0;
    break;


    case 99:                                                                        // c key - zähler auf 0 setzen
    leftEyeOpen = true;
    rightEyeOpen = true;
    blinkNumberLeft = 0;
    blinkNumberRight = 0;
    break;
    }
}
return 0;
}




void calcFlow(const Mat& flow, Mat& cflowmap, int step, int &globalMoveX, int &globalMoveY)
{
int localMoveX = 0;
int localMoveY = 0;

for (int y = 0; y < cflowmap.rows; y += step)
{
for (int x = 0; x < cflowmap.cols; x += step)
{
    const Point2f& fxy = flow.at<Point2f>(y, x);

    localMoveX = localMoveX + fxy.x;
    localMoveY = localMoveY + fxy.y;
    }
    }

globalMoveX = (localMoveX / (cflowmap.cols * cflowmap.rows))*2;                         
globalMoveY = (localMoveY / (cflowmap.rows * cflowmap.cols))*2;
}


void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea) {

Rect face = detectLargestObject(grayImage, cascFace);
if (face.width == 0 && face.height == 0) {
imshow("Ergebnis", image);                                  
return;                                                             
}

calibrationFace = calibrationFace - 1;

if (faceArea.height == 0|| calibrationFace < 1) {           
faceArea = face;
lastFace = grayImage(face);
calibrationFace = calibrationDefault;                           
}
else {                                                      

actualFace = grayImage(faceArea);

Mat flow, cflow;
calcOpticalFlowFarneback(lastFace, actualFace, flow, 0.5, 3, 15, 3, 5, 1.2, 0);

cvtColor(lastFace, cflow, CV_GRAY2BGR);

int globalMoveX, globalMoveY;

calcFlow(flow, cflow, 1, globalMoveX, globalMoveY);


faceArea.x = faceArea.x + globalMoveX;      
faceArea.y = faceArea.y + globalMoveY;

if (faceArea.x < 0) {                                       
    faceArea.x = 0;
}
if (faceArea.y < 0) {
    faceArea.y = 0;
}

if (faceArea.x + faceArea.width > image.size().width - 1) {     
    faceArea.x = image.size().width - faceArea.width - 1;
}
if (faceArea.y + faceArea.height > image.size().height - 1) {
    faceArea.y = image.size().height - faceArea.height - 1;
}
//rectangle(image,faceArea, 12);                
actualFace = grayImage(faceArea);               


    eyeTracking(actualFace, lastFace,blinknumber);                              //jetzt haben wir zwei stabilisierte Frames(aktuell&vorherige) nun können wir die Bewegung berechnen
swap(lastFace, actualFace);                                 //aktuelles Frame wird zu vorherigen Frame und umgekehrt
}



putText(image,patch::to_string(blinknumber), cvPoint(520, 45), FONT_HERSHEY_COMPLEX_SMALL, 1.5,   cvScalar(100, 100, 255), 1, CV_AA);

imshow("Ergebnis", image);                                      //Ergebniss anzeigen
}


Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace)  {

Rect value;

vector<Rect> faces;
cascFace.detectMultiScale(grayImage, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE |   CV_HAAR_FIND_BIGGEST_OBJECT, Size(150, 150), Size(300, 300));
if (faces.size() > 0) {
return faces[0];
}
else {
return value;
}
}




void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber) {

Mat eyeActual;
getEyes(actualFace, eyeActual);
Mat eyeActualGray;
cvtColor(eyeActual, eyeActualGray, COLOR_GRAY2BGR);

//medianBlur(eyeActual, eyeActual,5);
//cvtColor(eyeActual, eyeActualGray, COLOR_BGR2GRAY);

namedWindow("Kreis", CV_WINDOW_AUTOSIZE);


//Canny(eyeActual,eyeActual,5,70,3);
medianBlur(eyeActual, eyeActual,5);
//threshold(eyeActual,eyeActual,50,200,THRESH_BINARY);
//vector<vector<Point> > contours;

vector <Vec3f> circles;

//findContours(eyeActual.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
//drawContours(eyeActual, contours, -1, CV_RGB(255,255,255), -1);

HoughCircles( eyeActual, circles, CV_HOUGH_GRADIENT, 1, eyeActual.rows/8, 50,25,5,15 );

for( size_t i = 0; i < circles.size(); i++ )
  {

    Vec3i c = circles[i];
//circle(eyeActualGray,Point(c[0], c[1]), c[2], Scalar(0,0,255), 2);
circle( eyeActualGray, Point(c[0], c[1]), 2, Scalar(0,255,0), 2);

blinknumber=blinknumber+1;

}
imshow("Kreis", eyeActualGray);

}

void getEyes(Mat &face, Mat &eye) {


Size faceSize = face.size();

int eye_area_width = faceSize.width * eyeWidth;
int eye_area_height = faceSize.width *eyeHeight;
int eye_area_top = faceSize.height *eyeTop;

Rect rightEyeArea(faceSize.width*eyeSide, eye_area_top, eye_area_width, eye_area_height);
eye = face(rightEyeArea);
}