Ask Your Question
0

convert detected eye region (ROI) to Y channel

asked 2015-10-22 06:39:42 -0600

sarmad gravatar image

In the following code to detect face and eye

I want to convert the detected eye region only to Y channel of Ycbcr ,

Thanks for any help

Regards

#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

#include <iostream>
#include <stdio.h>

using namespace std;
using namespace cv;


void detectAndDisplay(Mat frame);

String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
 CascadeClassifier face_cascade;
 CascadeClassifier eyes_cascade;

 int main(int argc, const char** argv)
 {
 VideoCapture capture("face.mp4");
 Mat frame;

//-- 1. Load the cascades
if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading\n"); return -1; };
if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading\n"); return -1; };

//-- 2. Read the video stream

if (!capture.isOpened())
{
    printf("error to initialize camera");
    return 1;
}
    while (true)
    {
        capture >> frame;

        //-- 3. Apply the classifier to the frame
        if (!frame.empty())
        {
            detectAndDisplay(frame);
        }
        else
        {
            printf(" --(!) No captured frame -- Break!"); break;
        }

        int c = waitKey(10);
        if ((char)c == 'c') { break; }
    }

return 0;
  }

    /** @function detectAndDisplay */
   void detectAndDisplay(Mat frame)
 {
std::vector<Rect> faces;
Mat frame_gray;

cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);

//-- Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

for (size_t i = 0; i < faces.size(); i++)
{
    Point center(faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5);
    ellipse(frame, center, Size(faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);

    Mat faceROI = frame_gray(faces[i]);
    std::vector<Rect> eyes;

    //-- In each face, detect eyes
    eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

    for (size_t j = 0; j < eyes.size(); j++)
    {
        Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
        int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
        circle(frame, center, radius, Scalar(255, 0, 0), 4, 8, 0);
    }
    }
   //-- Show what you got
imshow(window_name, frame);
}
edit retag flag offensive close merge delete

1 answer

Sort by ยป oldest newest most voted
3

answered 2015-10-22 08:03:05 -0600

You should use the region of interest, which is the location of the eyes, to cut out a part of the original image, which is still in BGR color and not grayscale, then you should apply some color space conversion to it.

The following snippet does that, just put it at the proper place in your code.

// When looping over the eyes
// The deep clone is needed to have seperate data
Mat eye_region = frame(eyes[j]).clone();
Mat eye_region_YCbCr;
cvtColor(eye_region, eye_region_YCbCr, CV_BGR2YCrCb);
vector<Mat> channels;
split(eye_region_YCbCr, channels);
Mat eye_region_Ychannel = channels[0].clone();
edit flag offensive delete link more

Comments

I put you code inside for loop for the eyes , Is that right ?

eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));

    for (size_t j = 0; j < eyes.size(); j++)
    {
        Mat eye_region = frame(eyes[j]).clone();
        Mat eye_region_YCbCr;
        cvtColor(eye_region, eye_region_YCbCr, CV_BGR2YCrCb);
        vector<Mat> channels;
        split(eye_region_YCbCr, channels);
        Mat eye_region_Ychannel = channels[0].clone();

        Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
        int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
        circle(frame, center, radius, Scalar(255, 0, 0), 4, 8, 0);
    }
}
//-- Show what you got
imshow(window_name, frame);

}

how can I display the Y channel to see the result

sarmad gravatar imagesarmad ( 2015-10-23 02:34:40 -0600 )edit

Just like you visualised the frame? Just add imshow("Y channel", eye_region_Ychannel); waitKey(0); ... but seriously, why even attempt a C++ vision project if you do not know the basics yet. I suggest you take a look at the introductory OpenCV Tutorials.

StevenPuttemans gravatar imageStevenPuttemans ( 2015-10-23 03:54:51 -0600 )edit

I have no clue how you got that result ... I will fire up my Ubuntu this afternoon and have a look at it

StevenPuttemans gravatar imageStevenPuttemans ( 2015-10-23 04:59:23 -0600 )edit

as you see in this Image the eye and face detected , but the Y

channel is displayed as small window Y channel

sarmad gravatar imagesarmad ( 2015-10-23 05:08:13 -0600 )edit

It should be a small window, since it is only the eye region! But it should contain information!

StevenPuttemans gravatar imageStevenPuttemans ( 2015-10-23 07:09:29 -0600 )edit

So , what should I do to get the information ?

sarmad gravatar imagesarmad ( 2015-10-23 10:43:33 -0600 )edit

Question Tools

1 follower

Stats

Asked: 2015-10-22 06:39:42 -0600

Seen: 334 times

Last updated: Oct 22 '15