Face Recognititon in Video

asked 2014-09-27 16:29:39 -0600

Disa gravatar image

updated 2014-09-27 16:30:46 -0600

Good day everybody.

Last few days i tried to understood and execute FaceRecognizer with Edge or Fisher algorithms. Every time i have the same result (-1 or first prediction from list). I use at&t dataset, add my face (10 pictures), that i grab from camera (founded by viola-jones). Anyway, prediction work wrong.

(I'm trying to play with 'threshold', but have same result) Here's my code:

for (;;) {
    cap >> frame;
    cv::resize(frame, frame, cv::Size(426, 320), 0, 0, cv::INTER_CUBIC);
    cv::Mat original = frame.clone();
    cv::Mat gray;
    cv::cvtColor(original, gray, CV_BGR2GRAY);

    std::vector<cv::Rect_<int> > faces;
    haarСascade.detectMultiScale(frame, faces);

    std::vector<cv::Rect_<int> > eyes;
    eyeCascade.detectMultiScale(frame, eyes);

    cv::Mat faceResized;
    int lastPrediction = -1;

    for (std::vector<cv::Rect_<int> >::iterator iteratorFace = faces.begin(); iteratorFace != faces.end(); ++iteratorFace) {
      cv::Rect faceRect = *iteratorFace;
      cv::Mat face = gray(faceRect);

      bool eyeInFirstQuater  = false;
      bool eyeInSecondQuater = false;
      cv::Rect leftEye;
      cv::Rect rightEye;

      for (std::vector<cv::Rect_<int> >::iterator iteratorEye = eyes.begin(); iteratorEye != eyes.end(); ++iteratorEye)
      {
        cv::Rect eyeRect = *iteratorEye;
        cv::Point2i eyeCenter = cv::Point2i(eyeRect.x + eyeRect.width / 2, eyeRect.y + eyeRect.height / 2);  // Центр глаза

        if (!eyeInFirstQuater)
        {
          if (quarterOfPoint(faceRect, eyeCenter) == 1)
          {
            eyeInFirstQuater = true;
            leftEye = eyeRect;
          }
        }
        if (!eyeInSecondQuater)
        {
          if (quarterOfPoint(faceRect, eyeCenter) == 2)
          {
            eyeInSecondQuater = true;
            rightEye = eyeRect;
          }
        }
        if (eyeInFirstQuater && eyeInSecondQuater)
        {
          rectangle(original, leftEye,  CV_RGB(196, 0, 0), 1);
          rectangle(original, rightEye, CV_RGB(196, 0, 0), 1);
          break;
        }
      }
      if (eyeInFirstQuater && eyeInSecondQuater)
      {
        //cv::Mat faceResized;
        cv::resize(face,
          faceResized,
          cv::Size(imageWidth, imageHeight),
          1.0f, 1.0f,
          cv::INTER_CUBIC);

        int predictedLabel = -1;
        predictedLabel = model->predict(faceResized);

        rectangle(original, faceRect, CV_RGB(0, 196, 0), 1);

        std::string box_text = cv::format("%d", predictedLabel);

        int pos_x = std::max(faceRect.tl().x, 0);
        int pos_y = std::max(faceRect.tl().y, 0);

        putText(original,
          box_text,
          cv::Point(pos_x, pos_y),
          cv::FONT_HERSHEY_PLAIN,
          1.0f,
          CV_RGB(0, 255, 0),
          1);
      }
    }

    cv::resize(original, original, cv::Size(640, 480), 0, 0, cv::INTER_CUBIC);
    imshow("face_recognizer", original);
    char key = (char) cv::waitKey(5);

    if (key == 27) {
      break;
    }

    // grab from camera
    if ((key == 'g')) {
      std::string fileName(std::to_string(fileIndex));
      std::string fileExtension(".jpg");
      std::string imagePath = fileName + fileExtension;

      imwrite(imagePath, faceResized);
      fileIndex++;
    }
  }
edit retag flag offensive close merge delete

Comments

Start by adding samples of the original data set and samples of the data that you used. I am guessing you are using data that is somewhat uncompatible together.

StevenPuttemans gravatar imageStevenPuttemans ( 2014-09-29 06:30:41 -0600 )edit