Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

GMS-matcher doesn't write to Output-Vector

Hi there!

I have two databases containing image-pairs and try to find the corresponding image for the image in the first directory by iterating over the second directory and comparing local feature-vectors. I want to use GMS (OpenCV-Docs) to realize that and already have a working version in Python, which looks like this:

def compute_gms(query_path, train_path, nfeatures):
    query_files = sorted([x for x in query_path.iterdir() if x.is_file()], key=lambda x: int(x.name.split('_')[-1].split('.')[0]))
    train_files = sorted([x for x in train_path.iterdir() if x.is_file()], key=lambda x: int(x.name.split('_')[-1].split('.')[0]))

    orb = cv2.ORB_create(nfeatures)

    query_kpts = []
    query_descs = np.empty((nfeatures, 32, len(query_files)), dtype=np.uint8)

    t1 = timer()

    for i, q in enumerate(query_files):
        img = cv2.imread(str(q), 0)
        img = cv2.equalizeHist(img)
        kpts, descs = orb.detectAndCompute(img, None)  # Use background subtraction to remove further outliers!
        query_kpts.append(kpts)
        query_descs[...,i] = descs

    t2 = timer()
    print(f"Computing query-descs took {round(t2-t1)} seconds.")

    train_kpts = []
    train_descs = np.empty((nfeatures, 32, len(train_files)), dtype=np.uint8)

    t3 = timer()

    for i, t in enumerate(train_files):
        img = cv2.imread(str(t), 0)
        img = cv2.equalizeHist(img)
        kpts, descs = orb.detectAndCompute(img, None)
        train_kpts.append(kpts)
        train_descs[...,i] = descs

    t4 = timer()
    print(f"Computing train-descs took {round(t4-t3)} seconds.")

    scores = np.empty((len(query_files), len(train_files)))
    img_shape = cv2.imread(str(query_files[0]), 0).shape
    matcher = cv2.BFMatcher_create(cv2.NORM_HAMMING)

    for i in pb.progressbar(range(scores.shape[0])):
        for j in range(scores.shape[1]):
            matches = matcher.match(query_descs[:,:,i], train_descs[:,:,j])
            matches_gms = cv2.xfeatures2d.matchGMS(img_shape, img_shape, query_kpts[i], train_kpts[j], matches, withRotation=True)
            ratio = len(matches_gms) / nfeatures
            scores[i, j] = ratio

    return scores

Basically you only have to provide the paths to the two databases (as a pathlib.Path()). I tried to replicate the above in C++. Everything works, except that matchGMS doesn't write to gmsMatches! I looked at the sample on GitHub when writing the function and to my understanding i have done the same thing as they've written in their code, however gmsMatches remains empty().

Mat computeGms(const std::string &queryPath, const std::string &trainPath, const unsigned int _nfeatures=500) {
    // Creating ORB-object
    Ptr<Feature2D> orb {ORB::create(_nfeatures)};

    // Compute descriptors and keypoints for query-data
    std::vector<Mat> queryDescriptorsVec;
    std::vector<std::vector<KeyPoint>> queryKpts;

    auto queryDirIter = std::filesystem::directory_iterator(queryPath);
    int queryFileCount = std::count_if(begin(queryDirIter), end(queryDirIter),
                                       [](auto& entry) { return entry.is_regular_file(); });
    queryDescriptorsVec.reserve(queryFileCount);
    queryKpts.reserve(queryFileCount);

    // Declaring needed variables
    Mat descriptors;
    std::vector<KeyPoint> kpts;

    for (const auto &entry : std::filesystem::directory_iterator(queryPath)) {
        Mat img {imread(entry.path(), IMREAD_GRAYSCALE)};
        equalizeHist(img, img);
        orb->detectAndCompute(img, noArray(), kpts, descriptors);
        queryDescriptorsVec.push_back(descriptors.clone());
        queryKpts.push_back(kpts);
    }

    std::cout << "Computed query-data\n";

    // Compute descriptors and keypoints for train-data
    std::vector<Mat> trainDescriptorsVec;
    std::vector<std::vector<KeyPoint>> trainKpts;

    auto trainDirIter = std::filesystem::directory_iterator(trainPath);
    int trainFileCount = std::count_if(begin(trainDirIter), end(trainDirIter),
                                       [](auto& entry) { return entry.is_regular_file();});

    trainDescriptorsVec.reserve(trainFileCount);
    trainKpts.reserve(trainFileCount);

    for (const auto &entry : std::filesystem::directory_iterator(trainPath)) {
        Mat img {imread(entry.path(), IMREAD_GRAYSCALE)};
        equalizeHist(img, img);
        orb->detectAndCompute(img, noArray(), kpts, descriptors);
        trainDescriptorsVec.push_back(descriptors.clone());
        trainKpts.push_back(kpts);
    }

    std::cout << "Computed train-data\n";

    // Compute matches
    Mat scores(queryDescriptorsVec.size(), trainDescriptorsVec.size(), 0.0f);
    Ptr<DescriptorMatcher> matcher {BFMatcher::create(NORM_HAMMING)};
    float ratio, max, maxes{0};
    auto imsize = imread(std::filesystem::directory_iterator(queryPath)->path(), 0).size(); // ALL IMAGES HAVE SAME SIZE
    //                                                                   ^^^ get dimensions of first image the iterator points to

    for (size_t i = 0; i < queryDescriptorsVec.size(); ++i) {
        max = 0;
        for (size_t j = 0; j < trainDescriptorsVec.size(); ++j) {
            std::vector<DMatch> allMatches, gmsMatches;
            matcher->match(queryDescriptorsVec[i], trainDescriptorsVec[j], allMatches);
            xfeatures2d::matchGMS(imsize, imsize, queryKpts[i], trainKpts[j], allMatches, gmsMatches, true);
            ratio = gmsMatches.size() / _nfeatures;
            scores.at<float>(i,j) = ratio;
            if (ratio > max)
                max = ratio;
        }
        maxes += max;
        std::cout << i << "\tmean: " << maxes/(i+1) << "\n";
    }
    return scores;
}

What have i done wrong here? I know for a fact, that GMS finds matches, as i already have those outcomes verified via the Python version.

I installed the Python version by conda -c conda-forge opencv and the C++-Version from the master-branch (4.5.0 pre). I'm running Arch-Linux 5.8.13.

I will happily provide more info if necessary.