how to train image database save and retrieve results faster?
I have just started with OpenCV. My motivation is object detection. I am using Bag of words algorithm http://docs.opencv.org/trunk/modules/...
I am using caltech database. Partially Annotated Databases The Caltech Database http://pascallin.ecs.soton.ac.uk/chal...
I have attached my working code. So far the recognition is based on MAT() which is formed after looping over all the positive dataset images. So everytime I introduce a new test image out of the training dataset the code build the new MAT(), bowTrainer.add(features), and new Vocabulary.
I want to remove this step of training again and again. So that when i give a new image it checks against "final trained matrix" and gives result quickly.
Also, how to implement it such a way that after predicting the class of input image system is trained afterwards??
#include "stdafx.h"
#include "opencv\cv.h"
#include "opencv\highgui.h"
#include "opencv\ml.h"
#include <stdio.h>
#include <iostream>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2\nonfree\nonfree.hpp>
#include <vector>
using namespace cv;
using namespace std;
using std::cout;
using std::cerr;
using std::endl;
using std::vector;
char ch[30];
//--------Using SURF as feature extractor and FlannBased for assigning a new point to the nearest one in the dictionary
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
Ptr<DescriptorExtractor> extractor = new SurfDescriptorExtractor();
SurfFeatureDetector detector(500);
//---dictionary size=number of cluster's centroids
int dictionarySize = 1500;
TermCriteria tc(CV_TERMCRIT_ITER, 10, 0.001);
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize, tc, retries, flags);
BOWImgDescriptorExtractor bowDE(extractor, matcher);
// this function is being used to train the BOW classifier
void collectclasscentroids() {
IplImage *img;
int i,j;
for(j=1;j<=4;j++){
for(i=1;i<=60;i++){
sprintf( ch,"%s%d%s%d%s","train/",j," (",i,").jpg");
const char* imageName = ch;
img = cvLoadImage(imageName,0);
vector<KeyPoint> keypoint;
detector.detect(img, keypoint);
Mat features;
extractor->compute(img, keypoint, features);
bowTrainer.add(features);
}
}
return;
}
int _tmain(int argc, _TCHAR* argv[]){
int i,j;
IplImage *img2;
cout<<"Vector quantization..."<<endl;
collectclasscentroids();
vector<Mat> descriptors = bowTrainer.getDescriptors();
int count=0;
for(vector<Mat>::iterator iter=descriptors.begin();iter!=descriptors.end();iter++){
count+=iter->rows;
}
cout<<"Clustering "<<count<<" features"<<endl;
//choosing cluster's centroids as dictionary's words
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
cout<<"extracting histograms in the form of BOW for each image "<<endl;
Mat labels(0, 1, CV_32FC1);
Mat trainingData(0, dictionarySize, CV_32FC1);
int k=0;
vector<KeyPoint> keypoint1;
Mat bowDescriptor1;
//extracting histogram in the form of bow for each image
for(j=1;j<=4;j++){
for(i=1;i<=60;i++){
sprintf( ch,"%s%d%s%d%s","train/",j," (",i,").jpg");
const char* imageName = ch;
img2 = cvLoadImage(imageName,0);
detector.detect(img2, keypoint1);
bowDE.compute(img2, keypoint1, bowDescriptor1);
trainingData.push_back(bowDescriptor1);
//setting the label for images
labels.push_back((float) j);
}
}
//Setting up SVM parameters
CvSVMParams params;
params.kernel_type=CvSVM::RBF;
params.svm_type=CvSVM::C_SVC;
params ...
Here is a code similar to what you want to do. The idea is that you shall have 2 Mat, one of labels and one of predicted labels. And then compare them with a countNonZero.