Ask Your Question

Dasein's profile - activity

2020-09-22 17:55:48 -0600 received badge  Student (source)
2016-04-28 11:14:05 -0600 asked a question increasing fps

Hello guys , I am trying to compile this code(https://github.com/trishume/eyeLike/tree/master/src) about eye center localization.However my fps is too low(around 3) , could you have any ideas about how to optimize it ?

Here is the codes:

> >     #include "opencv2/objdetect.hpp"
> >     #include "opencv2/highgui.hpp"
> >     #include "opencv2/imgproc.hpp"
> >     #include <queue>
> >     #include  "constant.h"
> >     #include "eye_centerr.h"
> >     
> >     #define kEyeLeft true
> >     #define kEyeRight false
> >     
> >     
> >     
> >     
> >     
> >     /** Function Headers */
> >     void detectAndDisplay(cv::Mat frame);
> >     
> >     /** Global variables */
> >     //-- Note, either copy these two files from opencv/data/haarscascades
> > to your current folder, or change
> > these locations
> >     cv::String face_cascade_name = "haarcascade_frontalface_alt.xml";
> >     cv::CascadeClassifier face_cascade;
> >     std::string main_window_name = "Capture - Face detection";
> >     std::string face_window_name = "Capture - Face";
> >     cv::RNG rng(12345);
> >     cv::Mat debugImage;
> >     cv::Mat skinCrCbHist = cv::Mat::zeros(cv::Size(256, 256),
> > CV_8UC1);
> >     
> >     /**
> >     * @function main
> >     */
> >     int main(int argc, const char** argv) {
> >         cv::VideoCapture capture;
> >         cv::Mat frame;
> >     
> >         // Load the cascades
> >         if (!face_cascade.load(face_cascade_name))
> > { printf("--(!)Error loading face
> > cascade, please change
> > face_cascade_name in source code.\n");
> > return -1; };
> >     
> >         cv::namedWindow(main_window_name,
> CV_WINDOW_NORMAL);
> >         //cv::moveWindow(main_window_name,
> > 400, 100);
> >         cv::namedWindow(face_window_name,
> CV_WINDOW_NORMAL);
> >         //cv::moveWindow(face_window_name,
> > 10, 100);
> >     
> >     
> >     
> >     
> >         // Read the video stream
> >         if (argc > 1)
> >             capture.open(argv[1]);
> >         else
> >             capture.open(0);
> >     
> >         if (capture.isOpened()) {
> >             while (true) {
> >                 double t = (double)getTickCount();
> >                 capture >> frame;
> >                 frame.copyTo(debugImage);
> >     
> >                 // Apply the classifier to the frame
> >                 if (!frame.empty()) {
> >                     detectAndDisplay(frame);
> >                 }
> >                 else {
> >                     printf(" --(!) No captured frame -- Break!");
> >                     break;
> >                 }
> >     
> >                 t = (double)getTickCount() - t;
> >                 double fps = getTickFrequency() / t;
> >                 static double avgfps = 0;
> >                 static int nframes = 0;
> >                 nframes++;
> >                 double alpha = nframes > 50 ? 0.01 : 1. / nframes;
> >                 avgfps = avgfps*(1 - alpha) + fps*alpha;
> >     
> >                 putText(frame, format("fps: %.1f - %d", avgfps, nframes),
> > Point(20, 30),
> >                     FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 255, 0), 2);
> >     
> >     
> >                 imshow(main_window_name, frame);
> >     
> >                 int c = cv::waitKey(10);
> >                 if ((char)c == 'c') { break; }
> >                 if ((char)c == 'f') {
> >                     imwrite("frame.png", frame);
> >                 }
> >     
> >             }
> >         }
> >     
> >     
> >     
> >         return 0;
> >     }
> >     
> >     void findEyes(cv::Mat frame_gray, cv::Rect face) {
> >         cv::Mat faceROI = frame_gray(face);
> >         cv::Mat debugFace = faceROI;
> >     
> >         if (kSmoothFaceImage) {
> >             double sigma = kSmoothFaceFactor * face.width;
> >             GaussianBlur(faceROI, faceROI, cv::Size(0, 0), sigma);
> >         }
> >         //-- Find eye regions and draw them
> >         int eye_region_width = face.width * (kEyePercentWidth /
> 100.0);
> >         int eye_region_height = face.width * (kEyePercentHeight /
> > 100.0);
> >         int eye_region_top = face.height * (kEyePercentTop /
> 100.0);
> >         cv::Rect leftEyeRegion(face.width*(kEyePercentSide
> > / 100.0),
> >             eye_region_top, eye_region_width, eye_region_height);
> >         cv::Rect rightEyeRegion(face.width -
> > eye_region_width -
> > face.width*(kEyePercentSide / 100.0),
> >             eye_region_top, eye_region_width, eye_region_height);
> >     
> >         //-- Find Eye Centers
> >         cv::Point leftPupil = findEyeCenter(faceROI, leftEyeRegion,
> > "Left Eye");
> >         cv::Point rightPupil = findEyeCenter(faceROI, rightEyeRegion,
> > "Right Eye");
> >     
> >         rightPupil.x += rightEyeRegion.x;
> >         rightPupil.y += rightEyeRegion.y;
> >         leftPupil.x += leftEyeRegion.x;
> >         leftPupil.y += leftEyeRegion.y;
> >         // draw eye centers
> >         circle(debugFace, rightPupil, 3, 1234);
> >         circle(debugFace, leftPupil, 3, 1234);
> >     
> >         //-- Find Eye Corners
> >     
> >     
> >         imshow(face_window_name, faceROI);
> >         //  cv::Rect roi( cv::Point( 0, 0 ), faceROI.size());
> >         //  cv::Mat destinationROI = debugImage( roi );
> >         //  faceROI.copyTo( destinationROI );
> >     }
> >     
> >     
> >     
> >     /**
> >     * @function detectAndDisplay
> >     */
> >     void detectAndDisplay(cv::Mat frame) {
> >         std::vector<cv::Rect> faces;
> >         //cv::Mat frame_gray;
> >     
> >         std::vector<cv::Mat> rgbChannels(3);
> >         cv::split(frame, rgbChannels);
> >         cv::Mat frame_gray = rgbChannels[2];
> >     
> >         //cvtColor( frame, frame_gray, CV_BGR2GRAY );
> >         //equalizeHist( frame_gray, frame_gray );
> >         //cv ...
(more)
2016-04-27 04:11:02 -0600 received badge  Enthusiast
2016-04-26 07:27:42 -0600 received badge  Editor (source)
2016-04-26 06:55:06 -0600 asked a question hough circle transform error

Hello guys , I am trying to do hough circle transform in order to find pupil in eyeROI.But It doesnt work ..Do you have any idea about how to fix this problem ? Here is the code :

#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

using namespace std;
using namespace cv;

/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
String window_name2 = "right eye";
String window_name3 = "left eye";



Rect shrinkRect(Rect rect, int width_percent, int height_percent)
{
    if (width_percent > 100) width_percent = 100;
    if (height_percent > 100) height_percent = 100;

    Rect newrect;
    newrect.width = (rect.width * width_percent) / 100;
    newrect.height = (rect.height * height_percent) / 100;
    newrect.x = rect.x + (rect.width - newrect.width) / 2;
    newrect.y = rect.y + (rect.height - newrect.height) / 2;

    return newrect;
}

Rect expandRect(Rect rect, int width_percent, int height_percent)
{
    // not tested strongly
    Rect newrect;
    newrect.width = rect.width + ((rect.width * width_percent) / 100);
    newrect.height = rect.height + ((rect.height * height_percent) / 100);
    newrect.x = rect.x + (rect.width - newrect.width) / 2;
    newrect.y = rect.y + (rect.height - newrect.height) / 2;

    return newrect;
}

void updateEyeRects(vector<Rect> eyes, Rect& leftEyeRect, Rect& rightEyeRect)
{
    if (eyes.size() < 2) return;
    Rect intersection = eyes[0] & eyes[1];
    if (intersection.width > 1) return;

    if (eyes[0].x < eyes[1].x)
    {
        leftEyeRect = eyes[0];
        rightEyeRect = eyes[1];
    }
    else
    {
        leftEyeRect = eyes[1];
        rightEyeRect = eyes[0];
    }
}

std::vector<Rect> detectEyes(Mat frame, Rect faceRect)
{
    std::vector<Rect> faces;
    if (faceRect.width > 0)
    {
        faces.push_back(faceRect);
    }

    std::vector<Rect> eyes;
    Mat frame_gray;

    cvtColor(frame, frame_gray, COLOR_BGR2GRAY);

    //-- Detect faces
    if (faces.size() == 0)
    {
        equalizeHist(frame_gray, frame_gray);
        face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0, Size(80, 80));
    }

    for (size_t i = 0; i < faces.size(); i++)
    {
        faces[i].height -= faces[i].height / 2;
        faces[i].y += faces[i].height / 3;
        Mat faceROI = frame_gray(faces[i]);
        equalizeHist(faceROI, faceROI);
        //-- In each face, detect eyes
        eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));

        for (size_t j = 0; j < eyes.size(); j++)
        {
            eyes[j].x += faces[i].x;
            eyes[j].y += faces[i].y;
        }
    }
    return eyes;
}






int main(int argc, char** argv)
{
    VideoCapture capture;
    Mat frame;

    //-- 1. Load the cascade
    if (!face_cascade.load(face_cascade_name))
    {
        printf("--(!)Error loading face cascade\n");
        return -1;
    };
    if (!eyes_cascade.load(eyes_cascade_name))
    {
        printf("--(!)Error loading eyes cascade\n");
        return -1;
    };

    // Read the video stream or open the web cam
    if (argc > 1)
        capture.open(argv[1]);
    else
        capture.open(0);

    if (!capture.isOpened())
    {
        printf("--(!)Error opening video capture\n");
        return -1;
    }
    Mat left_frame, right_frame;
    Rect leftEyeRect;
    Rect rightEyeRect;
    vector<Rect> eyes;
    int fail_count = 0;

    while (capture.read(frame))
    {
        double t = (double)getTickCount();
        if (frame.empty())
        {
            printf(" --(!) No captured frame -- Break!");
            break;
        }

        if ((eyes.size() < 2) & !(leftEyeRect.width & rightEyeRect.width))
        {
            putText(frame, "Searching for an eye pair..", Point(20, 20), 0, 0.6, Scalar(0, 255, 0));
            eyes = detectEyes(frame, Rect());
            updateEyeRects(eyes, leftEyeRect, rightEyeRect);
        }
        else
        {
            Rect eyesRect(leftEyeRect | rightEyeRect);
            eyesRect = expandRect(eyesRect, 30, 300);
            eyesRect = eyesRect & Rect(0, 0, frame.cols, frame ...
(more)
2016-04-23 14:47:50 -0600 asked a question adding pupil detection

Hello guys, I am beginner at Opencv.I found eye regions via haar and cropped them.However , now I am trying to find pupils.

Do you have any suggestion to how to add pupil detection on those codes?

Thank you.

#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv)
{
CascadeClassifier eye;
CascadeClassifier righteye;
CascadeClassifier lefteye;
eye.load("haarcascade_mcs_eyepair_small.xml");
righteye.load("haarcascade_mcs_lefteye.xml");
lefteye.load("haarcascade_mcs_righteye.xml");
VideoCapture vid;
vid.open(0);
if (!vid.isOpened())
{
cout<<"webcam yuklenemedi"<<endl;
system("Pause");
return -1;
}
Mat frame;
Mat grires;
Mat frame2;
Mat frame3;
namedWindow("algilanan", WINDOW_AUTOSIZE);
namedWindow("algilanan2", WINDOW_AUTOSIZE);
namedWindow("algilanan3", WINDOW_AUTOSIZE);
while(true)
{
vid>>frame;
cvtColor(frame, grires, CV_BGR2GRAY);  //resmi gri renk uzayına çevirir.
//equalizeHist(grires, grires); //istenirse histogram eşitlenir.
vector<Rect> eyepair;
eye.detectMultiScale(grires, eyepair, 1.1, 3, 0, Size(30,30));
for(int i = 0; i < eyepair.size(); i++)
{
rectangle(frame, eyepair[i], Scalar(0, 255, 0));
Mat ROI =grires(eyepair[i]);
vector<Rect> reye;
vector<Rect> leye;
righteye.detectMultiScale(ROI, reye, 1.1, 3, 0, Size(30,30));
lefteye.detectMultiScale(ROI, leye, 1.1, 3, 0, Size(30,30));
for(int t = 0; t < reye.size(); t++)
{
bool isRightEye = reye[t].x > eyepair[i].width / 2;
reye[t].x += eyepair[i].x;
reye[t].y += eyepair[i].y;
rectangle(frame, reye[t], Scalar(0, 255, 0));

if(isRightEye)
frame3 = grires(reye[t]);
}
for(int z = 0; z < leye.size(); z++)
{
bool isLeftEye = leye[z].x < eyepair[i].width / 2;
leye[z].x += eyepair[i].x;
leye[z].y += eyepair[i].y;
rectangle(frame, leye[z], Scalar(0, 255, 0));

if(isLeftEye)
frame2 = grires(leye[z]);
}
}

imshow("algilanan", frame);
if(!frame2.empty())
    imshow("algilanan2", frame2);

if(!frame3.empty())
    imshow("algilanan3", frame3);
waitKey(33);
}
return 0;
}
2016-04-18 16:09:07 -0600 commented answer Assertion eror “ Vector subscript out of range ”Line 1140

My main purpose is creating eye tracking and gaze estimation program.If you have further suggestions , It would pleasure for me to listen.

2016-04-18 15:53:21 -0600 received badge  Supporter (source)
2016-04-18 15:53:20 -0600 received badge  Supporter (source)
2016-04-18 15:53:20 -0600 received badge  Scholar (source)
2016-04-18 15:53:18 -0600 commented answer Assertion eror “ Vector subscript out of range ”Line 1140

Thank you so much , it works perfect !

2016-04-18 08:42:42 -0600 asked a question Assertion eror “ Vector subscript out of range ”Line 1140

Hello guys, I am beginner at opencv.The thing is I am trying to do that is cropping eye region from main frame and focusing on pupil.But I got error like this "Assertion eror " Vector subscript out of range "Line 1140."Could you guys have any idea about how to fix that ?

Here is my code :

#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp> 
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv)
{
CascadeClassifier eye;
CascadeClassifier righteye;
CascadeClassifier lefteye;
eye.load("haarcascade_mcs_eyepair_small.xml");
righteye.load("haarcascade_mcs_lefteye.xml");
lefteye.load("haarcascade_mcs_righteye.xml");
VideoCapture vid;
vid.open(0);
if (!vid.isOpened())
{
cout<<"webcam yuklenemedi"<<endl;
system("Pause");
return -1;
}
Mat frame;
Mat grires;
Mat frame2;
Mat frame3;
namedWindow("algilanan", WINDOW_AUTOSIZE);
namedWindow("algilanan2", WINDOW_AUTOSIZE);
namedWindow("algilanan3", WINDOW_AUTOSIZE);
while(true)
{
vid>>frame;
cvtColor(frame, grires, CV_BGR2GRAY);  //resmi gri renk uzayına çevirir. 
//equalizeHist(grires, grires); //istenirse histogram eşitlenir. 
vector<Rect> eyepair;
eye.detectMultiScale(grires, eyepair, 1.1, 3, 0, Size(30,30));
for(int i = 0; i < eyepair.size(); i++)
{
Point pt1(eyepair[i].x + eyepair[i].width,eyepair[i].y + eyepair[i].height);
Point pt2(eyepair[i].x, eyepair[i].y);
rectangle(frame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Mat ROI =grires(eyepair[i]);
vector<Rect> reye;
vector<Rect> leye;
righteye.detectMultiScale(ROI, reye, 1.1, 3, 0, Size(30,30));
lefteye.detectMultiScale(ROI, leye, 1.1, 3, 0, Size(30,30));
for(int t = 0; t < reye.size(); t++)
{
Point pt1(eyepair[i].x + reye[t].x, eyepair[i].y + reye[t].y + reye[t].height);
Point pt2(reye[t].x,reye[t].y);
rectangle(frame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r1 = reye[t];
frame3=ROI(r1);
}
for(int z = 0; z < leye.size(); z++)
{
Point pt1(eyepair[i].x + leye[z].x, eyepair[i].y + leye[z].y + leye[z].height);
Point pt2(leye[z].x,leye[z].y);
rectangle(frame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r2 = reye[z];
frame2=ROI(r2);
}
}
imshow("algilanan2", frame);
imshow("algilanan1",frame2);
waitKey(33);
}
return 0;
}