Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

ANN_MLP output value error

When I try to train the network to output values to be 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1.0, it fails. Thanks for any help you can provide.ee

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;


void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));

    Mat output_training_data = Mat(1, 2, CV_32F).clone();

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    mlp->setBackpropMomentumScale(0.1);

    Mat layersSize = Mat(3, 1, CV_16U);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);

    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);

    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    for (int i = 0; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();

        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network to output values to be 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1.0, it fails. Thanks for any help you can provide.eeprovide.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;


void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));

    Mat output_training_data = Mat(1, 2, CV_32F).clone();

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    mlp->setBackpropMomentumScale(0.1);

    Mat layersSize = Mat(3, 1, CV_16U);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);

    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);

    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    for (int i = 0; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();

        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network to output values to be the value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1.0, 1, it fails. Thanks for any help you can provide.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;


void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));

    Mat output_training_data = Mat(1, 2, CV_32F).clone();

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    mlp->setBackpropMomentumScale(0.1);

    Mat layersSize = Mat(3, 1, CV_16U);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);

    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);

    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    for (int i = 0; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();

        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network network's output neurons to output the produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;


void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));

    Mat output_training_data = Mat(1, 2, CV_32F).clone();

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    mlp->setBackpropMomentumScale(0.1);

    Mat layersSize = Mat(3, 1, CV_16U);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);

    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);

    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    for (int i = 0; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();

        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));

    Mat output_training_data = Mat(1, 2, CV_32F).clone();

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    mlp->setBackpropMomentumScale(0.1);

    Mat layersSize = Mat(3, 1, CV_16U);
CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
     TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
     mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 0; 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
         add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide.provide. Otherwise, the training is successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    mlp->setBackpropMomentumScale(0.1);
//mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP);
mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    for (int i = 0; i < 10; i++)
    {
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training is successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    // Test the network again and again    
    for (int i = 0; i < 10; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training is and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    // Test the network again and again    
    for (int i = 0; i < 10; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output value error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    // Test the network again and again    
    for (int i = 0; i < 10; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0;
0.1f;
        output_training_data.at<float>(0, 1) = 0;
0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0;
0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0;
0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    // Test the network again and again    
    for (int i = 0; i < 10; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    // Test the network again and again    
    for (int i = 0; i < 10; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void print(Mat& mat, int prec)
{
    for (int i = 0; i<mat.size().height; i++)
    {
        cout << "[";
        for (int j = 0; j<mat.size().width; j++)
        {
            cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j);
            if (j != mat.size().width - 1)
                cout << ", ";
            else
                cout << "]" << endl;
        }
    }
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols;
    const int num_output_neurons = 2;
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, 2, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < 10; num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);
 
        if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++; 

        mlp->predict(flt_flowers_noise, result);

        if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << round_float(result.at<float>(0, 0)) << " "Success rate: " << round_float(result.at<float>(0, 1)) 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

        mlp->predict(flt_flowers_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_peacock_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        mlp->predict(flt_statue_noise, result);
        cout << round_float(result.at<float>(0, 0)) << " " << round_float(result.at<float>(0, 1)) << endl;

        cout << endl;
    }

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>

include <opencv2 opencv.hpp="">

using namespace cv; #pragma cv;

pragma comment(lib, "opencv_world331.lib") #include <iostream> #include <iomanip> "opencv_world331.lib")

include <iostream>

include <iomanip>

using namespace cv; using namespace ml; using namespace std; std;

float round_float(const float input) { return floor(input + 0.5f); } void print(Mat& mat, int prec) { for (int i = 0; i<mat.size().height; i++) { cout << "["; for (int j = 0; j<mat.size().width; j++) { cout << fixed << setw(2) << setprecision(prec) << mat.at<float>(i, j); if (j != mat.size().width - 1) cout << ", "; else cout << "]" << endl; } } } }

void add_noise(Mat &mat, float scale) { for (int j = 0; j < mat.rows; j++) { for (int i = 0; i < mat.cols; i++) { float noise = static_cast<float>(rand() % 256); noise /= 255.0f; 255.0f;

 mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);
 if (mat.at<float>(j, i) < 0)
 mat.at<float>(j, i) = 0;
  else if (mat.at<float>(j, i) > 1)
 mat.at<float>(j, i) = 1;
 }
}
}
 

}

int main(void) { const int image_width = 64; const int image_height = 64; 64;

// Read in 64 row x 64 column images
 Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
 Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
 Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
 Mat statue = imread("statue.png", IMREAD_GRAYSCALE);
 // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
 dove = dove.reshape(0, 1);
 flowers = flowers.reshape(0, 1);
 peacock = peacock.reshape(0, 1);
 statue = statue.reshape(0, 1);
 // Convert CV_8UC1 to CV_32FC1
 Mat flt_dove(dove.rows, dove.cols, CV_32FC1);
 for (int j = 0; j < dove.rows; j++)
 for (int i = 0; i < dove.cols; i++)
  flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;
 Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);
 for (int j = 0; j < flowers.rows; j++)
 for (int i = 0; i < flowers.cols; i++)
  flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;
 Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);
 for (int j = 0; j < peacock.rows; j++)
 for (int i = 0; i < peacock.cols; i++)
  flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;
 Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);
 for (int j = 0; j < statue.rows; j++)
 for (int i = 0; i < statue.cols; i++)
  flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;
 Ptr<ANN_MLP> mlp = ANN_MLP::create();
 // Slow the learning process
 //mlp->setBackpropMomentumScale(0.1);
 // Neural network elements
 const int num_input_neurons = dove.cols;
dove.cols; // One input neuron per grayscale pixel
const int num_output_neurons = 2;
2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
 Mat layersSize = Mat(3, 1, CV_16UC1);
 layersSize.row(0) = Scalar(num_input_neurons);
 layersSize.row(1) = Scalar(num_hidden_neurons);
 layersSize.row(2) = Scalar(num_output_neurons);
 mlp->setLayerSizes(layersSize);
 // Set various parameters
 mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
 TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
 mlp->setTermCriteria(termCrit);
 mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);
 Mat output_training_data = Mat(1, 2, num_output_neurons, CV_32FC1).clone();
 // Train the network once
 output_training_data.at<float>(0, 0) = 0;
 output_training_data.at<float>(0, 1) = 0;
 Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData);
 // Train the network again and again
 for (int i = 1; i < 1000; i++)
 {
  if (i % 100 == 0)
 cout << i << endl;
  // Make noisy version of images to be used as network input
 Mat flt_dove_noise = flt_dove.clone();
 Mat flt_flowers_noise = flt_flowers.clone();
 Mat flt_peacock_noise = flt_peacock.clone();
 Mat flt_statue_noise = flt_statue.clone();
  add_noise(flt_dove_noise, 0.1f);
 add_noise(flt_flowers_noise, 0.1f);
 add_noise(flt_peacock_noise, 0.1f);
 add_noise(flt_statue_noise, 0.1f);
 // Train for image 1
 output_training_data.at<float>(0, 0) = 0.1f;
 output_training_data.at<float>(0, 1) = 0.1f;
  trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
 // Train for image 2
 output_training_data.at<float>(0, 0) = 0.1f;
 output_training_data.at<float>(0, 1) = 0.9f;
  trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
 // Train for image 3
 output_training_data.at<float>(0, 0) = 0.9f;
 output_training_data.at<float>(0, 1) = 0.1f;
  trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
 // Train for image 4
 output_training_data.at<float>(0, 0) = 0.9f;
 output_training_data.at<float>(0, 1) = 0.9f;
  trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
 }
 int num_tests = 100;
 int num_successes = 0;
 int num_failures = 0;
 // Test the network again and again
 for (int i = 0; i < num_tests; i++)
 {
  // Use noisy input images
 Mat flt_dove_noise = flt_dove.clone();
 Mat flt_flowers_noise = flt_flowers.clone();
 Mat flt_peacock_noise = flt_peacock.clone();
 Mat flt_statue_noise = flt_statue.clone();
  add_noise(flt_dove_noise, 0.1f);
 add_noise(flt_flowers_noise, 0.1f);
 add_noise(flt_peacock_noise, 0.1f);
 add_noise(flt_statue_noise, 0.1f);
 Mat result;
  mlp->predict(flt_dove_noise, result);
  if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) < 0.5))
 num_successes++;
 else
 num_failures++;
num_failures++;
 mlp->predict(flt_flowers_noise, result);
  if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) > 0.5))
 num_successes++;
 else
 num_failures++;
  mlp->predict(flt_peacock_noise, result);
  if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) < 0.5))
 num_successes++;
 else
 num_failures++;
  mlp->predict(flt_statue_noise, result);
  if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
 num_successes++;
 else
 num_failures++;
 }
 cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;
 return 0;
}

}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

include <opencv2 opencv.hpp="">

#include <opencv2/opencv.hpp>
using namespace cv;

pragma cv; #pragma comment(lib, "opencv_world331.lib")

include <iostream>

include <iomanip>

"opencv_world331.lib") #include <iostream> #include <iomanip> using namespace cv; using namespace ml; using namespace std;

std; float round_float(const float input) { return floor(input + 0.5f); }

} void add_noise(Mat &mat, float scale) { for (int j = 0; j < mat.rows; j++) { for (int i = 0; i < mat.cols; i++) { float noise = static_cast<float>(rand() % 256); noise /= 255.0f;

255.0f;
 mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);
 if (mat.at<float>(j, i) < 0)
 mat.at<float>(j, i) = 0;
  else if (mat.at<float>(j, i) > 1)
 mat.at<float>(j, i) = 1;
 }
}
}

}

int main(void) { const int image_width = 64; const int image_height = 64;

64;
// Read in 64 row x 64 column images
 Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
 Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
 Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
 Mat statue = imread("statue.png", IMREAD_GRAYSCALE);
 // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
 dove = dove.reshape(0, 1);
 flowers = flowers.reshape(0, 1);
 peacock = peacock.reshape(0, 1);
 statue = statue.reshape(0, 1);
 // Convert CV_8UC1 to CV_32FC1
 Mat flt_dove(dove.rows, dove.cols, CV_32FC1);
 for (int j = 0; j < dove.rows; j++)
 for (int i = 0; i < dove.cols; i++)
 flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;
 Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);
 for (int j = 0; j < flowers.rows; j++)
 for (int i = 0; i < flowers.cols; i++)
 flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;
 Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);
 for (int j = 0; j < peacock.rows; j++)
 for (int i = 0; i < peacock.cols; i++)
 flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;
 Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);
 for (int j = 0; j < statue.rows; j++)
 for (int i = 0; i < statue.cols; i++)
 flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;
 Ptr<ANN_MLP> mlp = ANN_MLP::create();
 // Slow the learning process
 //mlp->setBackpropMomentumScale(0.1);
 // Neural network elements
 const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
 const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
 const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
 Mat layersSize = Mat(3, 1, CV_16UC1);
 layersSize.row(0) = Scalar(num_input_neurons);
 layersSize.row(1) = Scalar(num_hidden_neurons);
 layersSize.row(2) = Scalar(num_output_neurons);
 mlp->setLayerSizes(layersSize);
 // Set various parameters
 mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
 TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
 mlp->setTermCriteria(termCrit);
 mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);
 Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();
 // Train the network once
 output_training_data.at<float>(0, 0) = 0;
 output_training_data.at<float>(0, 1) = 0;
 Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData);
 // Train the network again and again
 for (int i = 1; i < 1000; i++)
 {
  if (i % 100 == 0)
 cout << i << endl;
  // Make noisy version of images to be used as network input
 Mat flt_dove_noise = flt_dove.clone();
 Mat flt_flowers_noise = flt_flowers.clone();
 Mat flt_peacock_noise = flt_peacock.clone();
 Mat flt_statue_noise = flt_statue.clone();
 add_noise(flt_dove_noise, 0.1f);
 add_noise(flt_flowers_noise, 0.1f);
 add_noise(flt_peacock_noise, 0.1f);
 add_noise(flt_statue_noise, 0.1f);
  // Train for image 1
 output_training_data.at<float>(0, 0) = 0.1f;
 output_training_data.at<float>(0, 1) = 0.1f;
  trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
  // Train for image 2
 output_training_data.at<float>(0, 0) = 0.1f;
 output_training_data.at<float>(0, 1) = 0.9f;
  trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
  // Train for image 3
 output_training_data.at<float>(0, 0) = 0.9f;
 output_training_data.at<float>(0, 1) = 0.1f;
  trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
  // Train for image 4
 output_training_data.at<float>(0, 0) = 0.9f;
 output_training_data.at<float>(0, 1) = 0.9f;
  trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
 mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
 }
 int num_tests = 100;
 int num_successes = 0;
 int num_failures = 0;
 // Test the network again and again
 for (int i = 0; i < num_tests; i++)
 {
  // Use noisy input images
 Mat flt_dove_noise = flt_dove.clone();
 Mat flt_flowers_noise = flt_flowers.clone();
 Mat flt_peacock_noise = flt_peacock.clone();
 Mat flt_statue_noise = flt_statue.clone();
 add_noise(flt_dove_noise, 0.1f);
 add_noise(flt_flowers_noise, 0.1f);
 add_noise(flt_peacock_noise, 0.1f);
 add_noise(flt_statue_noise, 0.1f);
 Mat result;
 mlp->predict(flt_dove_noise, result);
  if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) < 0.5))
 num_successes++;
 else
 num_failures++;
  mlp->predict(flt_flowers_noise, result);
  if (round_float(result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, 1) > 0.5))
 num_successes++;
 else
 num_failures++;
  mlp->predict(flt_peacock_noise, result);
  if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) < 0.5))
 num_successes++;
 else
 num_failures++;
  mlp->predict(flt_statue_noise, result);
  if (round_float(result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
 num_successes++;
 else
 num_failures++;
 }
 cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;
 return 0;
}

}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
    const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);

        if (round_float(result.at<float>(0, ((result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_flowers_noise, result);

        if (round_float(result.at<float>(0, ((result.at<float>(0, 0)) < 0.5 && round_float(result.at<float>(0, (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if (round_float(result.at<float>(0, ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if (round_float(result.at<float>(0, ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
    const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_flowers_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

    return 0;
}

This is odd because when doing the XOR operation, it can train for 1: https://github.com/sjhalayka/opencv_xor/blob/master/main.cpp

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
    const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_flowers_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

    return 0;
}

This is odd because when doing the XOR operation, it can train for 1: https://github.com/sjhalayka/opencv_xor/blob/master/main.cpp

Anyway, here's the code:

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
    const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_flowers_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

    return 0;
}

ANN_MLP output training value of 1 causes error

When I try to train the network's output neurons to produce a value of 1, it gives me an error saying that "OpenCV Error: One of arguments' values is out of range (Some of new output training vector components run exceed the original range too much)".

The input images are:

dove.png image description

flowers.png image description

peacock.png image description

statue.png image description

My full code is listed below. Near the end of the code I assign the value of 0.9 and it works. When I switch those values to 1, it fails. Thanks for any help you can provide. Otherwise, the training and testing are successful.

This is odd because when doing the XOR operation, it can train for 1: https://github.com/sjhalayka/opencv_xor/blob/master/main.cpp

Anyway, here's the code:

#include <opencv2/opencv.hpp>
using namespace cv;
#pragma comment(lib, "opencv_world331.lib")

#include <iostream>
#include <iomanip>

using namespace cv;
using namespace ml;
using namespace std;

float round_float(const float input)
{
    return floor(input + 0.5f);
}

void add_noise(Mat &mat, float scale)
{
    for (int j = 0; j < mat.rows; j++)
    {
        for (int i = 0; i < mat.cols; i++)
        {
            float noise = static_cast<float>(rand() % 256);
            noise /= 255.0f;

            mat.at<float>(j, i) = (mat.at<float>(j, i) + noise*scale) / (1.0f + scale);

            if (mat.at<float>(j, i) < 0)
                mat.at<float>(j, i) = 0;
            else if (mat.at<float>(j, i) > 1)
                mat.at<float>(j, i) = 1;
        }
    }
}

int main(void)
{
    const int image_width = 64;
    const int image_height = 64;

    // Read in 64 row x 64 column images
    Mat dove = imread("dove.png", IMREAD_GRAYSCALE);
    Mat flowers = imread("flowers.png", IMREAD_GRAYSCALE);
    Mat peacock = imread("peacock.png", IMREAD_GRAYSCALE);
    Mat statue = imread("statue.png", IMREAD_GRAYSCALE);

    // Reshape from 64 rows x 64 columns image to 1 row x (64*64) columns
    dove = dove.reshape(0, 1);
    flowers = flowers.reshape(0, 1);
    peacock = peacock.reshape(0, 1);
    statue = statue.reshape(0, 1);

    // Convert CV_8UC1 to CV_32FC1
    Mat flt_dove(dove.rows, dove.cols, CV_32FC1);

    for (int j = 0; j < dove.rows; j++)
        for (int i = 0; i < dove.cols; i++)
            flt_dove.at<float>(j, i) = dove.at<unsigned char>(j, i) / 255.0f;

    Mat flt_flowers(flowers.rows, flowers.cols, CV_32FC1);

    for (int j = 0; j < flowers.rows; j++)
        for (int i = 0; i < flowers.cols; i++)
            flt_flowers.at<float>(j, i) = flowers.at<unsigned char>(j, i) / 255.0f;

    Mat flt_peacock(peacock.rows, peacock.cols, CV_32FC1);

    for (int j = 0; j < peacock.rows; j++)
        for (int i = 0; i < peacock.cols; i++)
            flt_peacock.at<float>(j, i) = peacock.at<unsigned char>(j, i) / 255.0f;

    Mat flt_statue = Mat(statue.rows, statue.cols, CV_32FC1);

    for (int j = 0; j < statue.rows; j++)
        for (int i = 0; i < statue.cols; i++)
            flt_statue.at<float>(j, i) = statue.at<unsigned char>(j, i) / 255.0f;

    Ptr<ANN_MLP> mlp = ANN_MLP::create();

    // Slow the learning process
    //mlp->setBackpropMomentumScale(0.1);

    // Neural network elements
    const int num_input_neurons = dove.cols; // One input neuron per grayscale pixel
    const int num_output_neurons = 2; // 4 images to classify, so number of bits needed is ceiling(ln(n)/ln(2))
    const int num_hidden_neurons = static_cast<int>(sqrtf(image_width*image_height*num_output_neurons));
    Mat layersSize = Mat(3, 1, CV_16UC1);
    layersSize.row(0) = Scalar(num_input_neurons);
    layersSize.row(1) = Scalar(num_hidden_neurons);
    layersSize.row(2) = Scalar(num_output_neurons);
    mlp->setLayerSizes(layersSize);

    // Set various parameters
    mlp->setActivationFunction(ANN_MLP::ActivationFunctions::SIGMOID_SYM);
    TermCriteria termCrit = TermCriteria(TermCriteria::Type::COUNT + TermCriteria::Type::EPS, 1, 0.000001);
    mlp->setTermCriteria(termCrit);
    mlp->setTrainMethod(ANN_MLP::TrainingMethods::BACKPROP, 0.0001);

    Mat output_training_data = Mat(1, num_output_neurons, CV_32FC1).clone();

    // Train the network once
    output_training_data.at<float>(0, 0) = 0;
    output_training_data.at<float>(0, 1) = 0;
    Ptr<TrainData> trainingData = TrainData::create(flt_dove, SampleTypes::ROW_SAMPLE, output_training_data);
    mlp->train(trainingData);

    // Train the network again and again
    for (int i = 1; i < 1000; i++)
    {
        if (i % 100 == 0)
            cout << i << endl;

        // Make noisy version of images to be used as network input
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        // Train for image 1
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_dove_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 2
        output_training_data.at<float>(0, 0) = 0.1f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_flowers_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 3
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.1f;
        trainingData = TrainData::create(flt_peacock_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);

        // Train for image 4
        output_training_data.at<float>(0, 0) = 0.9f;
        output_training_data.at<float>(0, 1) = 0.9f;
        trainingData = TrainData::create(flt_statue_noise, SampleTypes::ROW_SAMPLE, output_training_data);
        mlp->train(trainingData, ANN_MLP::TrainFlags::UPDATE_WEIGHTS);
    }

    int num_tests = 100;
    int num_successes = 0;
    int num_failures = 0;

    // Test the network again and again    
    for (int i = 0; i < num_tests; i++)
    {
        // Use noisy input images
        Mat flt_dove_noise = flt_dove.clone();
        Mat flt_flowers_noise = flt_flowers.clone();
        Mat flt_peacock_noise = flt_peacock.clone();
        Mat flt_statue_noise = flt_statue.clone();
        add_noise(flt_dove_noise, 0.1f);
        add_noise(flt_flowers_noise, 0.1f);
        add_noise(flt_peacock_noise, 0.1f);
        add_noise(flt_statue_noise, 0.1f);

        Mat result;
        mlp->predict(flt_dove_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_flowers_noise, result);

        if ((result.at<float>(0, 0)) < 0.5 && (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_peacock_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && (result.at<float>(0, 1) < 0.5))
            num_successes++;
        else
            num_failures++;

        mlp->predict(flt_statue_noise, result);

        if ((result.at<float>(0, 0)) > 0.5 && round_float(result.at<float>(0, (result.at<float>(0, 1) > 0.5))
            num_successes++;
        else
            num_failures++;
    }

    cout << "Success rate: " << 100.0*static_cast<double>(num_successes) / static_cast<double>(num_tests * 4) << "%" << endl;

    return 0;
}