What is the right/best way for Eye detection ? [closed]

asked 2015-11-08 05:04:39 -0600

Noman gravatar image

updated 2015-11-08 05:06:40 -0600

I want to detect eyes inside a detected face in webcam footage. I am using haarcascade eye and split eye for detecting eyes. But the eye detection is too much unstable. The detected rectangle is always scaling, moving around and half of the time not detecting anything. It's too unstable for aligning and cropping the face correctly. Am I doing something wrong or is there any better way of eye detection?

I am currently following the Mastering opencv books Face Recognition formula. Heres my code -

        Mat topLeftOfFace;
        Mat topRightOfFace;

        //Getting the LeftEYe and RIghtEYe Portions of Images -- 

        getEyeRegions(biggestFace,"eye", topLeftOfFace, topRightOfFace,leftX,rightX,topY);

        //Dtecting the Left Eye Cascade

        newScale = topLeftOfFace.cols;
        Rect leftEye = detectCascadeSingle(topLeftOfFace, eyeDetector, newScale);

        if (leftEye.width <= 0){
            getEyeRegions(biggestFace, "split_eye", topLeftOfFace, Mat(0,0,CV_8U),leftX,rightX,topY);
            newScale = topLeftOfFace.cols;
            leftEye = detectCascadeSingle(topLeftOfFace, leftEyeDetector, newScale);
        }
        Point leftEyeCenter = Point(-1,-1);

        if (leftEye.width >= 0){
            leftEyeCenter.x = leftEye.x + leftEye.width / 2 + leftX;
            leftEyeCenter.y = leftEye.y + leftEye.height / 2 + topY;
        }
        //Detecting the RIght Eye cascade\

        newScale = topRightOfFace.cols;
        Rect rightEye = detectCascadeSingle(topRightOfFace, eyeDetector, newScale);

        if (rightEye.width <= 0){
            getEyeRegions(biggestFace, "split_eye", Mat(0, 0, CV_8U), topRightOfFace,leftX, rightX, topY);
            newScale = topRightOfFace.cols;
            rightEye = detectCascadeSingle(topRightOfFace, rightEyeDetector, newScale);
        }

        Point rightEyeCenter = Point(-1, -1);

        if (rightEye.width > 0){
            rightEyeCenter.x = rightEye.x + rightEye.width / 2 + rightX;
            rightEyeCenter.y = rightEye.y + rightEye.height / 2 + topY;
        }

And the getEyeRegion Function -

void getEyeRegions(Mat biggestFace,string mode, Mat &topLeftOfFace, Mat &topRightOfFace, int &leftX, int      &rightX, int &topY){

int widthX;
int heightY;

float EYE_SX;
float EYE_SY;
float EYE_SW;
float EYE_SH;

if (mode == "eye"){
    EYE_SX = 0.16;
    EYE_SY = 0.26;
    EYE_SW = 0.30;
    EYE_SH = 0.28;
}
else if (mode == "split_eye"){
    EYE_SX = 0.12;
    EYE_SY = 0.17;
    EYE_SW = 0.37;
    EYE_SH = 0.36;
}

leftX = cvRound(biggestFace.cols * EYE_SX);
topY = cvRound(biggestFace.rows * EYE_SY);
widthX = cvRound(biggestFace.cols * EYE_SW);
heightY = cvRound(biggestFace.rows * EYE_SH);
rightX = cvRound(biggestFace.cols * (1.0 - EYE_SX - EYE_SW));

topLeftOfFace = biggestFace(Rect(leftX, topY, widthX,
    heightY));
topRightOfFace = biggestFace(Rect(rightX, topY, widthX,
    heightY));

The detectCascadeSingle Function -

cv::Rect detectCascadeSingle(Mat img, CascadeClassifier faceDetector, int scaledWidth){
if (img.channels() == 3){
    cvtColor(img, img, CV_BGR2GRAY);
}
else if (img.channels() == 4){
    cvtColor(img, img, CV_BGRA2GRAY);
}
int DETECTION_WIDTH = scaledWidth;

float scale;
scale = img.cols / DETECTION_WIDTH;

if (img.cols > DETECTION_WIDTH){
    int scaledHeight = cvRound(img.rows / scale);
    resize(img, img, Size(DETECTION_WIDTH, scaledHeight));
}


//Image historgram equalization

Mat equalizedImg;

equalizeHist(img, img);


int flags = CASCADE_SCALE_IMAGE;

int biggestFlag = CASCADE_FIND_BIGGEST_OBJECT | CASCADE_DO_ROUGH_SEARCH;

Size minFeatureSize(5, 5);

float searchScaleFactor = 1.1f;

int minNeighbors = 4;

std::vector<Rect> faces;

faceDetector.detectMultiScale(img, faces, searchScaleFactor, minNeighbors, biggestFlag, minFeatureSize);


if (img.cols > DETECTION_WIDTH){
    for (int i = 0; i < (int)faces.size(); i++){
        faces[i].x = cvRound(faces[i].x * scale);
        faces[i].y = cvRound(faces[i].y * scale);
        faces[i].width = cvRound(faces[i].width * scale);
        faces[i].height = cvRound(faces[i].height * scale);
    }
}

//Keeping the face inside the border 

for (int i = 0; i < (int)faces.size(); i++){
    if (faces[i].x < 0){
        faces[i].x = 0;
    }
    if (faces[i].y ...
(more)
edit retag flag offensive reopen merge delete

Closed for the following reason the question is answered, right answer was accepted by sturkmen
close date 2020-10-13 14:09:02.497098

Comments

You need to add temporal smoothing by example using a Kalman filter over the center position of the eye. You need to average out the center position over a range of lets say 10 frames.

StevenPuttemans gravatar imageStevenPuttemans ( 2015-11-08 06:17:56 -0600 )edit