Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade). So I have two questions: 1. Why in different time it found different area of object? 2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade). object(face_cascade).

So I have two questions: 1. questions:

  1. Why in different time it found different area of object? 2. object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade).

So I have two questions:

  1. Why in different time it found different area of object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Image in first pass with one detected object:

image description

Image in second pass with two detected objects:

image description

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade).

So I have two questions:

  1. Why in different time it found different area of object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Image in first pass with one detected object:

image description

Image in second pass with two detected objects:

image description

Full code was like this:

String face_cascade_name = "cascade_02.xml"; CascadeClassifier face_cascade;

 int main( int argc, const char** argv )
{
    image = cvLoadImage("123.jpg", 1);
    int i = 2;
    while (i) {
        detectAndWrite(image);
        --i;
    }
}

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade).

So I have two questions:

  1. Why in different time it found different area of object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Image in first pass with one detected object:

image description

Image in second pass with two detected objects:

image description

Full code was like this:

String face_cascade_name = "cascade_02.xml"; CascadeClassifier face_cascade;

 int main( int argc, const char** argv )
{
    IplImage* image = cvLoadImage("123.jpg", 1);
    int i = 2;
    while (i) {
        detectAndWrite(image);
        --i;
    }
}

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade).

So I have two questions:

  1. Why in different time it found different area of object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Image in first pass with one detected object:

image description

Image in second pass with two detected objects:

image description

Full code was like this:

 String face_cascade_name = "cascade_02.xml";
 CascadeClassifier face_cascade;

face_cascade;

     int main( int argc, const char** argv )
 {
     IplImage* image = cvLoadImage("123.jpg", 1);
     int i = 2;
     while (i) {
         detectAndWrite(image);
         --i;
     }
 }

Faces detection with detectMultiScale(...)

I have code:

void detectAndWrite( Mat frame )
{
  std::vector<Rect> faces;
  Mat frame_gray;

  cvtColor( frame, frame_gray, CV_BGR2GRAY );
  equalizeHist( frame_gray, frame_gray );

  //-- Detect faces
  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

  for( int i = 0; i < faces.size(); i++ )
  {
    Point pt1(faces[i].x, faces[i].y);
    Point pt2(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
    cv::rectangle(frame,  pt1, pt2, Scalar( 100, 100, 255 ));
  }
  //-- Write
  imwrite("newimage.jpg", frame);
 }

I gave the function a picture, two times. In first time it found a one object, so I saved new picture with one rectangle. In second time, it found two objects! And saved it in a file with three rectangles! But there is only two. I understood that probably some information about that picture in process saving in Cascade object(face_cascade).

So I have two questions:

  1. Why in different time it found different area of object?
  2. How cascade remember picture with he already work? And object on it, how I can operate with it?

Image in first pass with one detected object:

image description

Image in second pass with two detected objects:

image description

Full code was like this:

 String face_cascade_name = "cascade_02.xml";
 CascadeClassifier face_cascade;

     int main( int argc, const char** argv )
    {
        if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
        IplImage* image = cvLoadImage("123.jpg", 1);
        int i = 2;
        while (i) {
            detectAndWrite(image);
            --i;
        }
    }