1 | initial version |
I actually tried with your answer. But the name of the person is not getting displayed in the video. I am now trying to display names of person in a video. For that I am using the code from the following link http://docs.opencv.org/trunk/modules/contrib/doc/facerec/tutorial/facerec_video_recognition.html
and I tried your answer to display the names of the persons in the video. But it is failing.
Below is the code
//g++ -ggdb pkg-config --cflags opencv
-o basename online_faceRec_video.cpp .cpp
online_faceRec_video.cpp pkg-config --libs opencv
/* * Copyright (c) 2011. Philipp Wagner <bytefish[at]gmx[dot]de>. * Released to public domain under terms of the BSD Simplified license. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the organization nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * See http://www.opensource.org/licenses/bsd-license */
using namespace cv; //using namespace cv::face; using namespace std;
static void read_csv(const string& filename, vector<mat>& images, vector<int>& labels, vector<string>& names, char separator = ';') { std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; CV_Error(CV_StsBadArg, error_message); } string line, path, classlabel,classnames; while (getline(file, line)) { stringstream liness(line); getline(liness, path, separator); getline(liness, classlabel); getline(liness, classnames); if(!path.empty() && !classlabel.empty()) { images.push_back(imread(path, 0)); labels.push_back(atoi(classlabel.c_str())); names.push_back(classnames.c_str()); } } } int main(int argc, const char *argv[]) { // Check for valid command line arguments, print usage // if no arguments were given. if (argc != 4) { cout << "usage: " << argv[0] << " </path> </path> </path>" << endl; cout << "\t </path> -- Path to the Haar Cascade for face detection." << endl; cout << "\t </path> -- Path to the CSV file with the face database." << endl; cout << "\t <device id=""> -- The webcam device id to grab frames from." << endl; exit(1); } // Get the path to your CSV: string fn_haar = string(argv[1]); string fn_csv = string(argv[2]); int deviceId = atoi(argv[3]); // These vectors hold the images and corresponding labels: vector<mat> images; vector<int> labels; vector<string> names;
try {
read_csv(fn_csv, images, labels,names);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
exit(1);
}
int im_width = images[0].cols;
int im_height = images[0].rows;
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(images, labels);
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
VideoCapture cap(deviceId);
if(!cap.isOpened()) {
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
return -1;
}
Mat frame;
for(;;) {
cap >> frame;
Mat original = frame.clone();
Mat gray;
cvtColor(original, gray, CV_BGR2GRAY);
vector< Rect_<int> > faces;
haar_cascade.detectMultiScale(gray, faces);
for(int i = 0; i < faces.size(); i++) {
Rect face_i = faces[i];
Mat face = gray(face_i);
Mat face_resized;
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
int prediction = model->predict(face_resized);
cout<<names[prediction]<<endl;
rectangle(original, face_i, CV_RGB(0, 255,0), 1);
string box_text = format("Prediction = %d", prediction);
cout<<box_text<<endl;
int pos_x = std::max(face_i.tl().x - 10, 0);
int pos_y = std::max(face_i.tl().y - 10, 0);
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
}
imshow("face_recognizer", original);
char key = (char) waitKey(20);
if(key == 27)
break;
}
return 0;
}
Please let me know where I have to modify. And the below is the text file I used for training. MyTraining/s2/1.pgm;2;Naveen MyTraining/s2/2.pgm;2;Naveen MyTraining/s2/3.pgm;2;Naveen MyTraining/s2/4.pgm;2;Naveen MyTraining/s2/5.pgm;2;Naveen MyTraining/s2/6.pgm;2;Naveen MyTraining/s2/7.pgm;2;Naveen MyTraining/s2/8.pgm;2;Naveen MyTraining/s2/9.pgm;2;Naveen MyTraining/s2/10.pgm;2;Naveen MyTraining/s3/1.pgm;3;Sunil MyTraining/s3/2.pgm;3;Sunil MyTraining/s3/3.pgm;3;Sunil MyTraining/s3/4.pgm;3;Sunil MyTraining/s3/5.pgm;3;Sunil MyTraining/s3/6.pgm;3;Sunil MyTraining/s3/7.pgm;3;Sunil MyTraining/s3/8.pgm;3;Sunil MyTraining/s3/9.pgm;3;Sunil MyTraining/s3/10.pgm;3;Sunil MyTraining/s4/1.pgm;4;Gopika MyTraining/s4/2.pgm;4;Gopika MyTraining/s4/3.pgm;4;Gopika MyTraining/s4/4.pgm;4;Gopika MyTraining/s4/5.pgm;4;Gopika MyTraining/s4/6.pgm;4;Gopika MyTraining/s4/7.pgm;4;Gopika MyTraining/s4/8.pgm;4;Gopika MyTraining/s4/9.pgm;4;Gopika MyTraining/s4/10.pgm;4;Gopika MyTraining/s1/1.pgm;1;Sowmya MyTraining/s1/2.pgm;1;Sowmya MyTraining/s1/3.pgm;1;Sowmya MyTraining/s1/4.pgm;1;Sowmya MyTraining/s1/5.pgm;1;Sowmya MyTraining/s1/6.pgm;1;Sowmya MyTraining/s1/7.pgm;1;Sowmya MyTraining/s1/8.pgm;1;Sowmya MyTraining/s1/9.pgm;1;Sowmya MyTraining/s1/10.pgm;1;Sowmya