Ask Your Question

Revision history [back]

Custom HOG not detecting features

I have an XML file with the contents of a trained SVM using HOG features from 14000 images in total, 9000 negative samples, and 5000 positive. I trained my SVM using OpenCV's Java extensions, but had to use C++ in order to extract the support vectors from my XML file because Java's bindings don't support that. When I load my XML file into my C++ program and try to detect an airplane, I am not getting any found locations of features. To make it ridiculously simple, I trained the SVM on about 15 negative images and 5 positive ones, then tried detecting features on one of the images I trained it with, and still am not getting any rectangles drawn on my screen because there are no features getting detected. If anyone can help with this, I'd appreciate it. The code below is the Java code I used to train the SVM, and the C++ code I am using to detect features for a given input image.

Training...

import java.io.File;
import java.util.ArrayList;
import java.util.List;

import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.highgui.Highgui;
import org.opencv.objdetect.HOGDescriptor;
import org.opencv.ml.CvSVM;
import org.opencv.ml.CvSVMParams;

public class Detector {

    public static void main(String[] args) {
        System.loadLibrary( Core.NATIVE_LIBRARY_NAME );


        // the locations of the training data
        File[] trainingSetA = new File ("C:/Users/danekstein/Desktop/positiveSample/").listFiles();
        File[] negative = new File ("C:/Users/danekstein/Desktop/negativeSample/").listFiles();


        List<File[]> trainingSets = new ArrayList<File[]>();

        trainingSets.add(trainingSetA);
        trainingSets.add(negative);

        // the amount of examples in each set
        int posACount = trainingSetA.length;
        int negCount = negative.length;

        // our labels are all initialized to being -1, Negative
        Mat aLabels = new Mat(posACount + negCount, 1, 5, new Scalar(-1));

        // we overwrite the positive portion of the matrix with 1, Positive
        aLabels.rowRange(0,posACount).setTo(new Scalar(1));

        //creating arrays for our feature vectors
        Mat[] featuresA = new Mat[posACount];
        Mat[] featuresN = new Mat[negCount];

        for(File[] set : trainingSets){

            int count = 0;

            for(File image : set){

                // read in the image as a matrix
                Mat img = Highgui.imread(image.getAbsolutePath(), 0);

                // create a new descriptor
                HOGDescriptor descriptor = new HOGDescriptor(new Size(256,128),new Size(128,64), new Size(1,1), new Size(8,8), 9);

                // initialize a vector in which the features will be placed
                MatOfFloat descriptors = new MatOfFloat();

                // compute the feature vector and store it in 'descriptors'
                descriptor.compute(img, descriptors);

                if(set.equals(trainingSetA)) featuresA[count] = descriptors.t();
                if(set.equals(negative)) featuresN[count] = descriptors.t();

                count++;
                System.out.println(count);
            }
        }

        System.out.println("Adding features to training matrix...");


        Mat trainingMatA = new Mat(posACount + negCount, featuresA[0].cols(), featuresA[0].type());

        for(int i=0;i<posACount;i++){
            featuresA[i].copyTo(trainingMatA.rowRange(i,i+1));
        }
        for(int i=0;i<negCount;i++){
            featuresN[i].copyTo(trainingMatA.rowRange(i+posACount,i+posACount+1));
        }

        System.out.println("Added to Matrix");

        System.out.println("Training model...");

        CvSVM svm = new CvSVM();
        CvSVMParams params = new CvSVMParams();
        params.set_kernel_type(CvSVM.LINEAR);
        params.set_svm_type(CvSVM.C_SVC);
        params.set_C(10);

        svm.train_auto(trainingMatA, aLabels, new Mat(), new Mat(), params);
        svm.save("C:/Users/danekstein/Desktop/svmFullAutoA.xml");

    }

}

Detecting...

#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#include <opencv2/objdetect.hpp>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>

#include <time.h>

using namespace cv;
using namespace cv::ml;
using namespace std;

void getSvmDetector(Ptr<SVM> & svm, vector < float > & detector);
void draw_locations(Mat & img, const vector < Rect > & locations, const Scalar & color);

int main(int, char**)
{

    Scalar bound(0, 255, 0);
    Mat img, draw;
    Ptr<SVM> svm;
    HOGDescriptor hog = HOGDescriptor::HOGDescriptor();
    hog.winSize = Size(256, 128);
    hog.blockSize = Size(128, 64);
    hog.blockStride = Size(1, 1);
    hog.cellSize = Size(8, 8);
    hog.nbins = 9;

    // locations where a plane is detected
    vector< Rect > locations;

    // loading the trained svm
    svm = StatModel::load<SVM>("C:/Users/danekstein/Desktop/svmFullAutoA.xml");

    // set the trained svm to the hog 
    vector< float > hog_detector;

    getSvmDetector(svm, hog_detector);

    // compare detector and descriptor sizes
    cout << hog_detector.size() << '\n' << hog.getDescriptorSize() << '\n' << hog.checkDetectorSize();

    // set the detector
    hog.setSVMDetector(hog_detector);

    Mat image = imread("C:/Users/danekstein/Desktop/tt.jpg");

    locations.clear();

    hog.detectMultiScale(image, locations);

    // print out the locations of the detected features
    for (vector<Rect>::const_iterator i = locations.begin(); i != locations.end(); ++i) {
        Rect r = *i;
        Size s = r.size();
        int width = s.width;
        int height = s.height;
        cout << width << ' ' << height << endl;
    }

    draw = image.clone();
    draw_locations(draw, locations, bound);

    imshow("Image", draw);
    waitKey(0);
    destroyAllWindows();
}

void getSvmDetector(Ptr<SVM> & svm, vector< float>  & detector) {
    // grab the support vectors... yay!
    Mat sv = svm->getSupportVectors();
    const int sv_total = sv.rows;

    // get the decision function
    Mat alpha, svidx;
    double rho = svm->getDecisionFunction(0, alpha, svidx);

    CV_Assert(alpha.total() == 1 && svidx.total() == 1 && sv_total == 1);
    CV_Assert((alpha.type() == CV_64F && alpha.at<double>(0) == 1.) ||
        (alpha.type() == CV_32F && alpha.at<float>(0) == 1.f));
    CV_Assert(sv.type() == CV_32F);
    detector.clear();

    detector.resize(sv.cols + 1);
    memcpy(&detector[0], sv.ptr(), sv.cols * sizeof(detector[0]));
    detector[sv.cols] = (float)-rho;
}

void draw_locations(Mat & img, const vector < Rect > & locations, const Scalar & color) {
    if (!locations.empty()) {
        vector< Rect >::const_iterator loc = locations.begin();
        vector< Rect >::const_iterator end = locations.end();
        for (; loc != end; ++loc) {
            rectangle(img, *loc, color, 2);
        }
    }
}