Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

CvANN_MLP give for every sample the same result

I have created a neural network with CvANN_MLP class, working with Opencv libraries in the 2.4.6 version. My Cv_ANN mlp network is:

 Mat trainingData(NUMERO_ESEMPI_TOTALE, 59, CV_32FC1);  
Mat trainingClasses(NUMERO_ESEMPI_TOTALE, 1, CV_32FC1);

for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
    for(int j=0;j<59;j++){
        trainingData.at<float>(i,j) = featureVect[i][j];
    }
}

for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
    trainingClasses.at<float>(i,0) = featureVect[i][59];
}

Mat testData (NUMERO_ESEMPI_TEST , 59, CV_32FC1);
Mat testClasses (NUMERO_ESEMPI_TEST , 1, CV_32FC1);
for(int i=0;i<NUMERO_ESEMPI_TEST;i++){
    for(int j=0;j<59;j++){
        testData.at<float>(i,j) = featureVectTest[i][j];
    }
}

//0 bocca, 1 non bocca.
testClasses.at<float>(0,0) = 1;
testClasses.at<float>(1,0) = 0;
testClasses.at<float>(2,0) = 1;
testClasses.at<float>(3,0) = 1;
testClasses.at<float>(4,0) = 0;
testClasses.at<float>(5,0) = 1;
testClasses.at<float>(6,0) = 0;
testClasses.at<float>(7,0) = 1;
testClasses.at<float>(8,0) = 1;
testClasses.at<float>(9,0) = 0;
testClasses.at<float>(10,0) = 0;
testClasses.at<float>(11,0) = 1;
testClasses.at<float>(12,0) = 0;
testClasses.at<float>(13,0) = 0;
testClasses.at<float>(14,0) = 0;
testClasses.at<float>(15,0) = 0;
testClasses.at<float>(16,0) = 0;
testClasses.at<float>(17,0) = 0;
testClasses.at<float>(18,0) = 0;
testClasses.at<float>(19,0) = 1;
testClasses.at<float>(20,0) = 1;
testClasses.at<float>(21,0) = 0;
testClasses.at<float>(22,0) = 1;
testClasses.at<float>(23,0) = 0;
testClasses.at<float>(24,0) = 1;
testClasses.at<float>(25,0) = 0;
testClasses.at<float>(26,0) = 0;
testClasses.at<float>(27,0) = 1;
testClasses.at<float>(28,0) = 1;
testClasses.at<float>(29,0) = 1;    

Mat layers = Mat(3, 1, CV_32SC1);
layers.row(0) = Scalar(59);
layers.row(1) = Scalar(3);
layers.row(2) = Scalar(1);

CvANN_MLP mlp;
CvANN_MLP_TrainParams params;
CvTermCriteria criteria;
criteria.max_iter = 100;
criteria.epsilon = 0.0000001;
criteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
params.train_method = CvANN_MLP_TrainParams :: BACKPROP;
params.bp_dw_scale = 0.05 ;
params.bp_moment_scale = 0.05 ;
params.term_crit = criteria ;
mlp.create(layers);
// train
mlp.train(trainingData,trainingClasses,Mat(),Mat(),params);

Mat response(1, 1, CV_32FC1);
Mat predicted(testClasses.rows, 1, CV_32F);
Mat pred(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
Mat pred1(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
for(int i = 0; i < testData . rows ; i++){
    Mat response(1, 1, CV_32FC1);
    Mat sample = testData.row(i);
    mlp.predict(sample,response);
    predicted.at<float>(i ,0) = response.at <float>(0,0);
    pred.at<float>(i,0)=predicted.at<float>(i ,0);
    pred1.at<float>(i,0)=predicted.at<float>(i ,0);
    file<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<"\n";
    //cout<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<endl;
}

The problem is that this network return me for every test sample the same result. I don't know why. My network takes as input a set of feature vector with 59 input values and 1 output value. Could you help me?

Thanks in advance.

click to hide/show revision 2
retagged

updated 2013-10-05 02:50:18 -0600

berak gravatar image

CvANN_MLP give for every sample the same result

I have created a neural network with CvANN_MLP class, working with Opencv libraries in the 2.4.6 version. My Cv_ANN mlp network is:

 Mat trainingData(NUMERO_ESEMPI_TOTALE, 59, CV_32FC1);  
Mat trainingClasses(NUMERO_ESEMPI_TOTALE, 1, CV_32FC1);

for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
    for(int j=0;j<59;j++){
        trainingData.at<float>(i,j) = featureVect[i][j];
    }
}

for(int i=0;i<NUMERO_ESEMPI_TOTALE;i++){
    trainingClasses.at<float>(i,0) = featureVect[i][59];
}

Mat testData (NUMERO_ESEMPI_TEST , 59, CV_32FC1);
Mat testClasses (NUMERO_ESEMPI_TEST , 1, CV_32FC1);
for(int i=0;i<NUMERO_ESEMPI_TEST;i++){
    for(int j=0;j<59;j++){
        testData.at<float>(i,j) = featureVectTest[i][j];
    }
}

//0 bocca, 1 non bocca.
testClasses.at<float>(0,0) = 1;
testClasses.at<float>(1,0) = 0;
testClasses.at<float>(2,0) = 1;
testClasses.at<float>(3,0) = 1;
testClasses.at<float>(4,0) = 0;
testClasses.at<float>(5,0) = 1;
testClasses.at<float>(6,0) = 0;
testClasses.at<float>(7,0) = 1;
testClasses.at<float>(8,0) = 1;
testClasses.at<float>(9,0) = 0;
testClasses.at<float>(10,0) = 0;
testClasses.at<float>(11,0) = 1;
testClasses.at<float>(12,0) = 0;
testClasses.at<float>(13,0) = 0;
testClasses.at<float>(14,0) = 0;
testClasses.at<float>(15,0) = 0;
testClasses.at<float>(16,0) = 0;
testClasses.at<float>(17,0) = 0;
testClasses.at<float>(18,0) = 0;
testClasses.at<float>(19,0) = 1;
testClasses.at<float>(20,0) = 1;
testClasses.at<float>(21,0) = 0;
testClasses.at<float>(22,0) = 1;
testClasses.at<float>(23,0) = 0;
testClasses.at<float>(24,0) = 1;
testClasses.at<float>(25,0) = 0;
testClasses.at<float>(26,0) = 0;
testClasses.at<float>(27,0) = 1;
testClasses.at<float>(28,0) = 1;
testClasses.at<float>(29,0) = 1;    

Mat layers = Mat(3, 1, CV_32SC1);
layers.row(0) = Scalar(59);
layers.row(1) = Scalar(3);
layers.row(2) = Scalar(1);

CvANN_MLP mlp;
CvANN_MLP_TrainParams params;
CvTermCriteria criteria;
criteria.max_iter = 100;
criteria.epsilon = 0.0000001;
criteria.type = CV_TERMCRIT_ITER | CV_TERMCRIT_EPS;
params.train_method = CvANN_MLP_TrainParams :: BACKPROP;
params.bp_dw_scale = 0.05 ;
params.bp_moment_scale = 0.05 ;
params.term_crit = criteria ;
mlp.create(layers);
// train
mlp.train(trainingData,trainingClasses,Mat(),Mat(),params);

Mat response(1, 1, CV_32FC1);
Mat predicted(testClasses.rows, 1, CV_32F);
Mat pred(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
Mat pred1(NUMERO_ESEMPI_TEST, 1, CV_32FC1);
for(int i = 0; i < testData . rows ; i++){
    Mat response(1, 1, CV_32FC1);
    Mat sample = testData.row(i);
    mlp.predict(sample,response);
    predicted.at<float>(i ,0) = response.at <float>(0,0);
    pred.at<float>(i,0)=predicted.at<float>(i ,0);
    pred1.at<float>(i,0)=predicted.at<float>(i ,0);
    file<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<"\n";
    //cout<<"Value Image "<<i<<": "<<predicted.at<float>(i ,0)<<endl;
}

The problem is that this network return me for every test sample the same result. I don't know why. My network takes as input a set of feature vector with 59 input values and 1 output value. Could you help me?

Thanks in advance.