Ask Your Question

Revision history [back]

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve position tracking, and a position detection like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to solve this problem.

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve position tracking, and a position detection like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to solve this problem.

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve position tracking, and a position state detection like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to solve this problem.

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve position tracking, and a state detection like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to solve this problem.detect positive and/or negative samples(valve open or close).

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve position tracking, detection and a state detection classification like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to detect positive and/or negative samples(valve open or close).

DetectMultiscale in a Multi classification SVM hog classifier in video stream

I want to create a valve detection and classification like this video : https://www.youtube.com/watch?v=VY92fqmSdfA

i chose to use HOG(Histogram Oriented Gradient) + SVM, and this is how it works :

1-Read positive images (valve open) and negatives images(valve close).

int const num_total_images = 100;
int width = 128, height = 128;
// load images
vector<cv::String> fn;
glob("Add here positive images path *.jpg", fn, false);
vector<Mat> imagesO;
size_t count = fn.size(); //number of png files in images folder
for (size_t i = 0; i<count; i++)
    imagesO.push_back(imread(fn[i], 0));

cout << "yOU HAVE ADD " << imagesO.size() << " IMAGES OF VALVE OPEN" << endl;

// LOAD CLOSE VALVE
vector<cv::String> fn2;
glob(" ADD HERE NEGATIVE IMAGES PATH/*.jpg", fn2, false);
vector<Mat> imagesF;
size_t count2 = fn2.size(); //number of png files in images folder
for (size_t i = 0; i<count2; i++)
    imagesF.push_back(imread(fn2[i], 0));

Mat HOGFeat_train(num_total_images, 18900, CV_32FC1); //86xdescriptor size training Mat
int numimgopenc = imagesO.size();
int numimgclose = imagesF.size();
for (int i = 0; i < imagesO.size(); i++)
{
    resize(imagesO[i], imagesO[i], Size(width, height));
    // Canny(imagesO[i], imagesO[i], 150, 255, 3);// 50 150 3

    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesO[i], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values

    for (int j = 0; j < descriptors_train.size()-1; j++)
        HOGFeat_train.at<float>(i , j) = descriptors_train.at(j);


}

for (int j = 0; j < imagesF.size(); j++)
{
    resize(imagesF[j], imagesF[j], Size(width, height));
    HOGDescriptor hog_train;
    vector<float> descriptors_train;
    vector<Point> locations_train;

    hog_train.compute(imagesF[j], descriptors_train, Size(16, 16), Size(0, 0), locations_train); //not sure about these values
    for (int i = 0; i < descriptors_train.size(); i++)
        HOGFeat_train.at<float>(j, i) = descriptors_train.at(i);

}

2-Add a matrix label 1 for open and -1 for close

int labels[num_total_images];
for (int i = 0; i < numimgopenc; i++)
    labels[i] = 1;
for (int i = 0; i < numimgclose; i++)
    labels[i + numimgclose] = -1;
Mat labelsMat(num_total_images, 1, CV_32S, labels);

3- Train and load SVM

Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));

cout << endl << "Training............." << endl;
svm->train(HOGFeat_train, ROW_SAMPLE, labelsMat);
cout << "Fin du Training --------------" << endl;
svm->save("svm.xml"); // saving
svm->load("svm.xml"); // loading

4-And in the end test with a video stream

VideoCapture cap(0); // open the default camera
if (!cap.isOpened())  // check if we succeeded
{
    return -1;
}

Mat test_img;
Mat HOGFeat_test(1, 18900, CV_32FC1); // 3780

for (;;)
{
    cap >> test_img; // get a new frame from camera
    if (waitKey(30) >= 0) break;
    while (test_img.empty())
    {
        std::cout << "Frame Vacio" << std::endl;
    }

    namedWindow("Display window6", WINDOW_AUTOSIZE);
    imshow("Display window6", test_img);


    resize(test_img, test_img, Size(width, height));


    HOGDescriptor hog_test;
    vector<float> descriptors_test;
    vector<Point> locations_test;

    hog_test.compute(test_img, descriptors_test, Size(16, 16), Size(0, 0), locations_test);

    for (int i = 0; i < descriptors_test.size(); i++)
        HOGFeat_test.at<float>(0, i) = descriptors_test.at(i);

    namedWindow("Test Image", WINDOW_NORMAL);
    imshow("Test Image", test_img);

    float result = svm->predict(HOGFeat_test);

    if (result > 0)
        cout << "Ouvert" << endl;
    else
        cout << "ferme" << endl;

    cout << "Result: " << result << endl;

    waitKey(30);

}

My problem is with this code I can't use the function HOGDescriptor:: detectMultiscale to search if I can find positive (open valve) or negative result (valve close) because detectMultiscale is used only to track and detect positive samples, and I don't know how to detect positive and/or negative samples(valve open or close).

And thank you.