Ask Your Question

Revision history [back]

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

C:\fakepath\fisherface.png

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

include "F_imageprocessing.h"

include <iostream>

include <fstream>

include <sstream>

include "opencv/cv.h"

include "opencv/cvaux.h"

include "main_derp.h"

include "opencv2/contrib/contrib.hpp"

C:\fakepath\fisherface.png//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html imageprocessing::imageprocessing(QObject *parent) : QObject(parent) { }

int imageprocessing::imagecheck(QVector<qstring> img_paths, QVector<int> img_ID) {
int prediction = 0; //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString(); string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"; vector<mat> images; vector<int> labels; for (int j=0; j<img_paths.count(); j++)="" {="" if(!img_paths[j].tostdstring().empty())="" {="" qimage="" q_image;="" q_image.load(qstring(img_paths[j]));="" images.push_back(imread(img_paths[j].tostdstring().c_str(),="" 0));="" labels.push_back(img_id[j]);="" }else="" return="" -1;="" }="" int="" im_width="images[0].cols;" int="" im_height="images[0].rows;&lt;/p">

// Get a handle to the Video device:
VideoCapture cap(0);
cap.isOpened();
Mat frame;

//start of Algorithm (fisherface)
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
//Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
model->train(images, labels);
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
for(int zz=0;zz<3;zz++) {
//for(;;) {
    cap >> frame;
    // Clone the current frame:
    Mat original = frame.clone();
    // Convert the current frame to grayscale:
    Mat gray;
    cvtColor(original, gray, CV_BGR2GRAY);
    // Find the faces in the frame:
    vector< Rect_<int> > faces;
    haar_cascade.detectMultiScale(gray, faces);
    for(int i = 0; i < faces.size(); i++) {
        Rect face_i = faces[i];
        Mat face = gray(face_i);
        Mat face_resized;
        cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
        prediction = model->predict(face_resized);
        qDebug()<<prediction;
        /*
        //create rectangle around detected image
        rectangle(original, face_i, CV_RGB(0, 255,0), 1);
        string box_text = format("Prediction = %d", prediction);
        // Calculate the position for annotated text and put the rectangle into image
        int pos_x = std::max(face_i.tl().x - 10, 0);
        int pos_y = std::max(face_i.tl().y - 10, 0);
        putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);*/
    }
    /* Show the result:
    imshow("face_recognizer", original);
    char key = (char) waitKey(20);*/
    // Exit this loop on escape:
}
qDebug()<<prediction;
return prediction;

}

click to hide/show revision 3
No.3 Revision

updated 2013-03-03 15:45:15 -0600

berak gravatar image

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

include "F_imageprocessing.h"

include <iostream>

include <fstream>

include <sstream>

include "opencv/cv.h"

include "opencv/cvaux.h"

include "main_derp.h"

include "opencv2/contrib/contrib.hpp"

#include "F_imageprocessing.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv/cv.h"
#include "opencv/cvaux.h"
#include "main_derp.h"

#include "opencv2/contrib/contrib.hpp"
//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
imageprocessing::imageprocessing(QObject *parent) :
    QObject(parent)
{
}

} int imageprocessing::imagecheck(QVector<qstring> imageprocessing::imagecheck(QVector<QString> img_paths, QVector<int> img_ID) {
int prediction = 0; //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString(); string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml"; vector<mat> vector<Mat> images; vector<int> labels; for (int j=0; j<img_paths.count(); j++)="" {="" if(!img_paths[j].tostdstring().empty())="" {="" qimage="" q_image;="" q_image.load(qstring(img_paths[j]));="" images.push_back(imread(img_paths[j].tostdstring().c_str(),="" 0));="" labels.push_back(img_id[j]);="" }else="" return="" -1;="" }="" int="" im_width="images[0].cols;" int="" im_height="images[0].rows;&lt;/p">

j++)
    {
        if(!img_paths[j].toStdString().empty())
        {
            QImage q_image;
            q_image.load(QString(img_paths[j]));
            images.push_back(imread(img_paths[j].toStdString().c_str(), 0));
            labels.push_back(img_ID[j]);
        }else return -1;
    }
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Get a handle to the Video device:
 VideoCapture cap(0);
 cap.isOpened();
 Mat frame;

 //start of Algorithm (fisherface)
 Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
 //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
 model->train(images, labels);
 CascadeClassifier haar_cascade;
 haar_cascade.load(fn_haar);
 for(int zz=0;zz<3;zz++) {
 //for(;;) {
     cap >> frame;
     // Clone the current frame:
     Mat original = frame.clone();
     // Convert the current frame to grayscale:
     Mat gray;
     cvtColor(original, gray, CV_BGR2GRAY);
     // Find the faces in the frame:
     vector< Rect_<int> > faces;
     haar_cascade.detectMultiScale(gray, faces);
     for(int i = 0; i < faces.size(); i++) {
         Rect face_i = faces[i];
         Mat face = gray(face_i);
         Mat face_resized;
         cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
         prediction = model->predict(face_resized);
         qDebug()<<prediction;
         /*
         //create rectangle around detected image
         rectangle(original, face_i, CV_RGB(0, 255,0), 1);
         string box_text = format("Prediction = %d", prediction);
         // Calculate the position for annotated text and put the rectangle into image
         int pos_x = std::max(face_i.tl().x - 10, 0);
         int pos_y = std::max(face_i.tl().y - 10, 0);
         putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);*/
     }
     /* Show the result:
     imshow("face_recognizer", original);
     char key = (char) waitKey(20);*/
     // Exit this loop on escape:
 }
 qDebug()<<prediction;
 return prediction;
}

}

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

#include "F_imageprocessing.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv/cv.h"
#include "opencv/cvaux.h"
#include "main_derp.h"

#include "opencv2/contrib/contrib.hpp"
//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
imageprocessing::imageprocessing(QObject *parent) :
    QObject(parent)
{
}

int imageprocessing::imagecheck(QVector<QString> img_paths, QVector<int> img_ID)
{    
    int prediction = 0;
    //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString();
    string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml";
    vector<Mat> images;
    vector<int> labels;
    for (int j=0; j<img_paths.count(); j++)
    {
        if(!img_paths[j].toStdString().empty())
        {
            QImage q_image;
            q_image.load(QString(img_paths[j]));
            images.push_back(imread(img_paths[j].toStdString().c_str(), 0));
            labels.push_back(img_ID[j]);
        }else return -1;
    }
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Get a handle to the Video device:
    VideoCapture cap(0);
    cap.isOpened();
    Mat frame;

    //start of Algorithm (fisherface)
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    model->train(images, labels);
    CascadeClassifier haar_cascade;
    haar_cascade.load(fn_haar);
    for(int zz=0;zz<3;zz++) {
    //for(;;) {
        cap >> frame;
        // Clone the current frame:
        Mat original = frame.clone();
        // Convert the current frame to grayscale:
        Mat gray;
        cvtColor(original, gray, CV_BGR2GRAY);
        // Find the faces in the frame:
        vector< Rect_<int> > faces;
        haar_cascade.detectMultiScale(gray, faces);
        for(int i = 0; i < faces.size(); i++) {
            Rect face_i = faces[i];
            Mat face = gray(face_i);
            Mat face_resized;
            cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
            prediction = model->predict(face_resized);
            qDebug()<<prediction;
            /*
            //create rectangle around detected image
            rectangle(original, face_i, CV_RGB(0, 255,0), 1);
            string box_text = format("Prediction = %d", prediction);
            // Calculate the position for annotated text and put the rectangle into image
            int pos_x = std::max(face_i.tl().x - 10, 0);
            int pos_y = std::max(face_i.tl().y - 10, 0);
            putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);*/
         }
         /* Show the result:
        imshow("face_recognizer", original);
        char key = (char) waitKey(20);*/
        // Exit this loop on escape:
    }
    qDebug()<<prediction;
    return prediction;
}

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

#include "F_imageprocessing.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv/cv.h"
#include "opencv/cvaux.h"
#include "main_derp.h"

#include "opencv2/contrib/contrib.hpp"
//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
imageprocessing::imageprocessing(QObject *parent) :
    QObject(parent)
{
}

int imageprocessing::imagecheck(QVector<QString> img_paths, QVector<int> img_ID)
{    
    int prediction = 0;
    //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString();
    string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml";
    vector<Mat> images;
    vector<int> labels;
    for (int j=0; j<img_paths.count(); j++)
    {
        if(!img_paths[j].toStdString().empty())
        {
            QImage q_image;
            q_image.load(QString(img_paths[j]));
            images.push_back(imread(img_paths[j].toStdString().c_str(), 0));
            labels.push_back(img_ID[j]);
        }else return -1;
    }
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Get a handle to the Video device:
    VideoCapture cap(0);
    cap.isOpened();
    Mat frame;

    //start of Algorithm (fisherface)
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    model->train(images, labels);
    CascadeClassifier haar_cascade;
    haar_cascade.load(fn_haar);
    for(int zz=0;zz<3;zz++) {
    //for(;;) {
        cap >> frame;
        // Clone the current frame:
        Mat original = frame.clone();
        // Convert the current frame to grayscale:
        Mat gray;
        cvtColor(original, gray, CV_BGR2GRAY);
        // Find the faces in the frame:
        vector< Rect_<int> > faces;
        haar_cascade.detectMultiScale(gray, faces);
        for(int i = 0; i < faces.size(); i++) {
            Rect face_i = faces[i];
            Mat face = gray(face_i);
            Mat face_resized;
            cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
            prediction = model->predict(face_resized);
            qDebug()<<prediction;
            /*
            //create 

/* create rectangle around detected image rectangle(original, face_i, CV_RGB(0, 255,0), 1); string box_text = format("Prediction = %d", prediction); // Calculate the position for annotated text and put the rectangle into image int pos_x = std::max(face_i.tl().x - 10, 0); int pos_y = std::max(face_i.tl().y - 10, 0); putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);*/ 2.0); / } /* / Show the result: imshow("face_recognizer", original); char key = (char) waitKey(20);*/ waitKey(20); */ // Exit this loop on escape: } qDebug()<<prediction; return prediction; } qDebug()<<prediction; return="" prediction;="" }<="" p="">

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

#include "F_imageprocessing.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv/cv.h"
#include "opencv/cvaux.h"
#include "main_derp.h"

#include "opencv2/contrib/contrib.hpp"
//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
imageprocessing::imageprocessing(QObject *parent) :
    QObject(parent)
{
}

int imageprocessing::imagecheck(QVector<QString> img_paths, QVector<int> img_ID)
{    
    int prediction = 0;
    //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString();
    string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml";
    vector<Mat> images;
    vector<int> labels;
    for (int j=0; j<img_paths.count(); j++)
    {
        if(!img_paths[j].toStdString().empty())
        {
            QImage q_image;
            q_image.load(QString(img_paths[j]));
            images.push_back(imread(img_paths[j].toStdString().c_str(), 0));
            labels.push_back(img_ID[j]);
        }else return -1;
    }
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Get a handle to the Video device:
    VideoCapture cap(0);
    cap.isOpened();
    Mat frame;

    //start of Algorithm (fisherface)
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    model->train(images, labels);
    CascadeClassifier haar_cascade;
    haar_cascade.load(fn_haar);
    for(int zz=0;zz<3;zz++) {
    //for(;;) {
        cap >> frame;
        // Clone the current frame:
        Mat original = frame.clone();
        // Convert the current frame to grayscale:
        Mat gray;
        cvtColor(original, gray, CV_BGR2GRAY);
        // Find the faces in the frame:
        vector< Rect_<int> > faces;
        haar_cascade.detectMultiScale(gray, faces);
        for(int i = 0; i < faces.size(); i++) {
            Rect face_i = faces[i];
            Mat face = gray(face_i);
            Mat face_resized;
            cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
            prediction = model->predict(face_resized);
            qDebug()<<prediction;

/* create /*create rectangle around detected image rectangle(original, face_i, CV_RGB(0, 255,0), 1); string box_text = format("Prediction = %d", prediction); // Calculate the position for annotated text and put the rectangle into image int pos_x = std::max(face_i.tl().x - 10, 0); int pos_y = std::max(face_i.tl().y - 10, 0); putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0); / 2.0);*/ } / Show /*Show the result: imshow("face_recognizer", original); char key = (char) waitKey(20); */ waitKey(20);*/ // Exit this loop on escape: } qDebug()<<prediction; return="" prediction;="" }<="" p="">

qDebug()<<prediction; return prediction; }

FisherFaceRecognizer- prediction result always 1

Hi guys, I'm, trying to replicate what posted at: http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html (it's Facial Recognition from Video, using FisherFace algorithm)

I've changed the quoted code only to retrieve vector<mat> images and vector<int> labels from Database. They are input parameters for my function, and I've verified that the right .bmp images are opened What is happening is that I always get result 1 for "prediction" vector..Even if i use different photos with different expressions (of my brother too).

I know it may seem vague, and for that reason I attach the code, but have you any suggestion on what the problem can be? thanks A

#include "F_imageprocessing.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include "opencv/cv.h"
#include "opencv/cvaux.h"
#include "main_derp.h"

#include "opencv2/contrib/contrib.hpp"
//http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
imageprocessing::imageprocessing(QObject *parent) :
    QObject(parent)
{
}

int imageprocessing::imagecheck(QVector<QString> img_paths, QVector<int> img_ID)
{    
    int prediction = 0;
    //string fn_haar = (QDir::currentPath()+"/haarcascade_frontalface_default.xml").toStdString();
    string fn_haar = "/opt/local/share/OpenCV/haarcascades/haarcascade_frontalface_default.xml";
    vector<Mat> images;
    vector<int> labels;
    for (int j=0; j<img_paths.count(); j++)
    {
        if(!img_paths[j].toStdString().empty())
        {
            QImage q_image;
            q_image.load(QString(img_paths[j]));
            images.push_back(imread(img_paths[j].toStdString().c_str(), 0));
            labels.push_back(img_ID[j]);
        }else return -1;
    }
    int im_width = images[0].cols;
    int im_height = images[0].rows;

    // Get a handle to the Video device:
    VideoCapture cap(0);
    cap.isOpened();
    Mat frame;

    //start of Algorithm (fisherface)
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    //Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    model->train(images, labels);
    CascadeClassifier haar_cascade;
    haar_cascade.load(fn_haar);
    for(int zz=0;zz<3;zz++) {
    //for(;;) {
        cap >> frame;
        // Clone the current frame:
        Mat original = frame.clone();
        // Convert the current frame to grayscale:
        Mat gray;
        cvtColor(original, gray, CV_BGR2GRAY);
        // Find the faces in the frame:
        vector< Rect_<int> > faces;
        haar_cascade.detectMultiScale(gray, faces);
        for(int i = 0; i < faces.size(); i++) {
            Rect face_i = faces[i];
            Mat face = gray(face_i);
            Mat face_resized;
            cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
            prediction = model->predict(face_resized);
            qDebug()<<prediction;
            /*create rectangle around detected image
            rectangle(original, face_i, CV_RGB(0, 255,0), 1);
            string box_text = format("Prediction = %d", prediction);
            // Calculate the position for annotated text and put the rectangle into image
            int pos_x = std::max(face_i.tl().x - 10, 0);
            int pos_y = std::max(face_i.tl().y - 10, 0);
            putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);*/
         }
         /*Show the result:
        imshow("face_recognizer", original);
        char key = (char) waitKey(20);*/
        // Exit this loop on escape:
    }
    qDebug()<<prediction;
    return prediction;
}