OpenCV Error When Train ANN_MLP
i want train neural network to classify two type of images but when train network this error occurred :
OpenCV Error: Bad argument (output training data should be a floating-point matrix with the number of rows equal to the number of training samples and the number of columns equal to the size of last (output) layer) in cv::ml::ANN_MLPImpl::prepare_to_train, file C:\buildslave64\win64_amdocl\master_PackSlave-win64-vc14-shared\opencv\modules\ml\src\ann_mlp.cpp, line 675
my code :
#include "opencv2\core.hpp"#include "opencv2\imgproc.hpp"#include "opencv2\imgcodecs.hpp"#include "opencv2\highgui.hpp"#include "opencv2\ml.hpp"#include <string>#include "lbp.h" using namespace cv; using namespace cv::ml; void LoadTrainingData();
Mat Data; Mat Lables; //const int numberOfClass1 = 2384; //const int numberOfClass2 = 2462; const int numberOfClass1 = 23; const int numberOfClass2 = 24; int Class1 = 1; int Class2 = -1; const int imageDimention = 22;
std::string NumberToString(size_t Number) { std::stringstream ss; ss << Number; return ss.str(); }
void main() { LoadTrainingData(); Ptr<ann_mlp> annClassifier; annClassifier = ANN_MLP::create(); annClassifier->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM); Mat layers(1, 3, CV_32F); layers.at<float>(0) = Data.cols; layers.at<float>(1) = 100; layers.at<float>(2) = 2; annClassifier->setLayerSizes(layers); annClassifier->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6)); annClassifier->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP); bool trained = annClassifier->train(Data,ROW_SAMPLE,Lables); if (trained) annClassifier->save("Ann_sigmoid_eye"); }
void LoadTrainingData() { Data = Mat(numberOfClass1 + numberOfClass2, imageDimention*imageDimention, CV_32FC1); Lables = Mat(numberOfClass1 + numberOfClass2,1 , CV_32SC1); // load openEye Mat img; Mat lbpImg; Mat row; std::string path; for (size_t i = 1; i <= numberOfClass2; i++) { path = "class1 (" + NumberToString(i) + ").jpg"; img = imread(path); if (img.channels() > 1) cvtColor(img, img, CV_BGR2GRAY); lbp::ELBP(img,lbpImg, 1, 16);
row = lbpImg.reshape(0, 1); row.convertTo(row, CV_32FC1); Data.push_back(row); Lables.push_back(Class1); } for (size_t i = 1; i <= numberOfClass1; i++) { path ="class2 (" + NumberToString(i) + ").jpg"; img = imread(path); if (img.channels() > 1) cvtColor(img, img, CV_BGR2GRAY); lbp::ELBP(img,lbpImg, 1, 16); row = lbpImg.reshape(0, 1); row.convertTo(row, CV_32FC1); Data.push_back(row); Lables.push_back(Class2); } }
i don't why this happen ! please help me,thanks.
looking back at this - simply converting LBP features to float will make a horribly bad feature.
(you're expected to either use patched histograms(see face recognition), or some embedding , that works better with L2 distance / SGD)