Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

edit: I tried to implement the reduction i tried to remove the keypoints of the scene as pointed out by StevenPuttemans but just one instance in the scene is found

    int main()
    {
    std::string inputScene = "Scene.bmp";
    std::string inputObject = "Object.bmp";

    // LOAD IMAGES (as grayscale)
    cv::Mat objectImg = cv::imread(inputObject, cv::IMREAD_GRAYSCALE);
    cv::Mat sceneImg = cv::imread(inputScene, cv::IMREAD_GRAYSCALE);

    std::vector<cv::Point2f> objCorners(4), sceneCorners(4);
    objCorners[0] = cv::Point(0,0); 
    objCorners[1] = cvPoint( objectImg.cols, 0 );
    objCorners[2] = cv::Point( objectImg.cols, objectImg.rows ); 
    objCorners[3] = cvPoint( 0, objectImg.rows );
    cv::Mat showResImg;
    sceneImg.copyTo(showResImg);

    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> sceneKeypoints;
    cv::Mat objectDescriptors;
    cv::Mat sceneDescriptors;
    int minHessian = 400;
    cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create( minHessian );
    detector->detect(objectImg, objectKeypoints);
    detector->detect(sceneImg, sceneKeypoints);




    int ind = 0;
    do
    {
        cv::Ptr<cv::DescriptorExtractor> extractor;
        extractor = cv::xfeatures2d::SIFT::create();
        extractor->compute(objectImg, objectKeypoints, objectDescriptors);
        extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);

        cv::Mat results, dists;
        int k=2;

        // Create Flann KDTree index
        cv::flann::Index flannIndex(sceneDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

        // search (nearest neighbor)
        flannIndex.knnSearch(objectDescriptors, results, dists, k, cv::flann::SearchParams() );

        // PROCESS NEAREST NEIGHBOR RESULTS
        // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
        std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
        std::vector<int> indexes_1, indexes_2; // Used for homography
        std::vector<uchar> outlier_mask;  // Used for homography

        for(unsigned int i=0; i<objectDescriptors.rows; ++i)
        {
            // Check if this descriptor matches with those of the objects
            // Apply NNDR
            float nndrRatio = 0.8f;
            if( results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
            {
                mpts_1.push_back(objectKeypoints.at(i).pt);
                indexes_1.push_back(i);

                mpts_2.push_back(sceneKeypoints.at(results.at<int>(i,0)).pt);
                indexes_2.push_back(results.at<int>(i,0));
            }
        }

        // FIND HOMOGRAPHY
        int nbMatches = 8;
        if(mpts_1.size() >= nbMatches)
        {
            cv::Mat H = findHomography(mpts_1,
                    mpts_2,
                    cv::RANSAC,
                    1.0,
                    outlier_mask);
            std::cout << "H: " << H << std::endl;

            // Do what you want with the homography (like showing a rectangle)
            // The "outlier_mask" contains a mask representing the inliers and outliers.
            // ...

            perspectiveTransform( objCorners, sceneCorners, H);
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( showResImg, sceneCorners[0] ,
                        sceneCorners[1] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[1] , 
                         sceneCorners[2] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[2] , 
                        sceneCorners[3] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[3] , 
                         sceneCorners[0] , cv::Scalar( 0, 255, 0), 4 );

        }

//        //--Remove all used
        for (unsigned int idx=0; idx<outlier_mask.size(); idx++)
        {
            if ( outlier_mask[idx] )
            {
                int indErase = indexes_1[idx];
                sceneKeypoints.erase( sceneKeypoints.begin()+indErase );
            }
        }

    ind++;
    } while( ind < 3 );

    cv::imshow("Matching Result", showResImg);
    cv::waitKey();

    return 0;
}

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

edit: I tried to implement the reduction i tried to remove the keypoints of the scene as pointed out by StevenPuttemans but just one instance in the scene is found

    int main()
    {
    std::string inputScene = "Scene.bmp";
    std::string inputObject = "Object.bmp";

    // LOAD IMAGES (as grayscale)
    cv::Mat objectImg = cv::imread(inputObject, cv::IMREAD_GRAYSCALE);
    cv::Mat sceneImg = cv::imread(inputScene, cv::IMREAD_GRAYSCALE);

    std::vector<cv::Point2f> objCorners(4), sceneCorners(4);
    objCorners[0] = cv::Point(0,0); 
    objCorners[1] = cvPoint( objectImg.cols, 0 );
    objCorners[2] = cv::Point( objectImg.cols, objectImg.rows ); 
    objCorners[3] = cvPoint( 0, objectImg.rows );
    cv::Mat showResImg;
    sceneImg.copyTo(showResImg);

    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> sceneKeypoints;
    cv::Mat objectDescriptors;
    cv::Mat sceneDescriptors;
    int minHessian = 400;
    cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create( minHessian );
    detector->detect(objectImg, objectKeypoints);
    detector->detect(sceneImg, sceneKeypoints);




    int ind = 0;
    do
    {
        cv::Ptr<cv::DescriptorExtractor> extractor;
        extractor = cv::xfeatures2d::SIFT::create();
        extractor->compute(objectImg, objectKeypoints, objectDescriptors);
        extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);

        cv::Mat results, dists;
        int k=2;

        // Create Flann KDTree index
        cv::flann::Index flannIndex(sceneDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

        // search (nearest neighbor)
        flannIndex.knnSearch(objectDescriptors, results, dists, k, cv::flann::SearchParams() );

        // PROCESS NEAREST NEIGHBOR RESULTS
        // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
        std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
        std::vector<int> indexes_1, indexes_2; // Used for homography
        std::vector<uchar> outlier_mask;  // Used for homography

        for(unsigned int i=0; i<objectDescriptors.rows; ++i)
        {
            // Check if this descriptor matches with those of the objects
            // Apply NNDR
            float nndrRatio = 0.8f;
            if( results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
            {
                mpts_1.push_back(objectKeypoints.at(i).pt);
                indexes_1.push_back(i);

                mpts_2.push_back(sceneKeypoints.at(results.at<int>(i,0)).pt);
                indexes_2.push_back(results.at<int>(i,0));
            }
        }

        // FIND HOMOGRAPHY
        int nbMatches = 8;
        if(mpts_1.size() >= nbMatches)
        {
            cv::Mat H = findHomography(mpts_1,
                    mpts_2,
                    cv::RANSAC,
                    1.0,
                    outlier_mask);
            std::cout << "H: " << H << std::endl;

            // Do what you want with the homography (like showing a rectangle)
            // The "outlier_mask" contains a mask representing the inliers and outliers.
            // ...

            perspectiveTransform( objCorners, sceneCorners, H);
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( showResImg, sceneCorners[0] ,
                        sceneCorners[1] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[1] , 
                         sceneCorners[2] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[2] , 
                        sceneCorners[3] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[3] , 
                         sceneCorners[0] , cv::Scalar( 0, 255, 0), 4 );

        }

//        //--Remove all used
        for (unsigned int idx=0; idx<outlier_mask.size(); idx++)
        {
            if ( outlier_mask[idx] )
            {
                int indErase = indexes_1[idx];
                sceneKeypoints.erase( sceneKeypoints.begin()+indErase );
            }
        }

    ind++;
    } while( ind < 3 );

    cv::imshow("Matching Result", showResImg);
    cv::waitKey();

    return 0;
}

Scene:

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

edit: I tried to implement the reduction i tried to remove the keypoints of the scene as pointed out by StevenPuttemans but just one instance in the scene is found

    int main()
    {
    std::string inputScene = "Scene.bmp";
    std::string inputObject = "Object.bmp";

    // LOAD IMAGES (as grayscale)
    cv::Mat objectImg = cv::imread(inputObject, cv::IMREAD_GRAYSCALE);
    cv::Mat sceneImg = cv::imread(inputScene, cv::IMREAD_GRAYSCALE);

    std::vector<cv::Point2f> objCorners(4), sceneCorners(4);
    objCorners[0] = cv::Point(0,0); 
    objCorners[1] = cvPoint( objectImg.cols, 0 );
    objCorners[2] = cv::Point( objectImg.cols, objectImg.rows ); 
    objCorners[3] = cvPoint( 0, objectImg.rows );
    cv::Mat showResImg;
    sceneImg.copyTo(showResImg);

    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> sceneKeypoints;
    cv::Mat objectDescriptors;
    cv::Mat sceneDescriptors;
    int minHessian = 400;
    cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create( minHessian );
    detector->detect(objectImg, objectKeypoints);
    detector->detect(sceneImg, sceneKeypoints);




    int ind = 0;
    do
    {
        cv::Ptr<cv::DescriptorExtractor> extractor;
        extractor = cv::xfeatures2d::SIFT::create();
        extractor->compute(objectImg, objectKeypoints, objectDescriptors);
        extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);

        cv::Mat results, dists;
        int k=2;

        // Create Flann KDTree index
        cv::flann::Index flannIndex(sceneDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

        // search (nearest neighbor)
        flannIndex.knnSearch(objectDescriptors, results, dists, k, cv::flann::SearchParams() );

        // PROCESS NEAREST NEIGHBOR RESULTS
        // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
        std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
        std::vector<int> indexes_1, indexes_2; // Used for homography
        std::vector<uchar> outlier_mask;  // Used for homography

        for(unsigned int i=0; i<objectDescriptors.rows; ++i)
        {
            // Check if this descriptor matches with those of the objects
            // Apply NNDR
            float nndrRatio = 0.8f;
            if( results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
            {
                mpts_1.push_back(objectKeypoints.at(i).pt);
                indexes_1.push_back(i);

                mpts_2.push_back(sceneKeypoints.at(results.at<int>(i,0)).pt);
                indexes_2.push_back(results.at<int>(i,0));
            }
        }

        // FIND HOMOGRAPHY
        int nbMatches = 8;
        if(mpts_1.size() >= nbMatches)
        {
            cv::Mat H = findHomography(mpts_1,
                    mpts_2,
                    cv::RANSAC,
                    1.0,
                    outlier_mask);
            std::cout << "H: " << H << std::endl;

            // Do what you want with the homography (like showing a rectangle)
            // The "outlier_mask" contains a mask representing the inliers and outliers.
            // ...

            perspectiveTransform( objCorners, sceneCorners, H);
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( showResImg, sceneCorners[0] ,
                        sceneCorners[1] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[1] , 
                         sceneCorners[2] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[2] , 
                        sceneCorners[3] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[3] , 
                         sceneCorners[0] , cv::Scalar( 0, 255, 0), 4 );

        }

//        //--Remove all used
        for (unsigned int idx=0; idx<outlier_mask.size(); idx++)
        {
            if ( outlier_mask[idx] )
            {
                int indErase = indexes_1[idx];
                sceneKeypoints.erase( sceneKeypoints.begin()+indErase );
            }
        }

    ind++;
    } while( ind < 3 );

    cv::imshow("Matching Result", showResImg);
    cv::waitKey();

    return 0;
}

Scene:multiLena

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

edit: I tried to implement the reduction i tried to remove the keypoints of the scene as pointed out by StevenPuttemans but just one instance in the scene is found

    int main()
    {
    std::string inputScene = "Scene.bmp";
    std::string inputObject = "Object.bmp";

    // LOAD IMAGES (as grayscale)
    cv::Mat objectImg = cv::imread(inputObject, cv::IMREAD_GRAYSCALE);
    cv::Mat sceneImg = cv::imread(inputScene, cv::IMREAD_GRAYSCALE);

    std::vector<cv::Point2f> objCorners(4), sceneCorners(4);
    objCorners[0] = cv::Point(0,0); 
    objCorners[1] = cvPoint( objectImg.cols, 0 );
    objCorners[2] = cv::Point( objectImg.cols, objectImg.rows ); 
    objCorners[3] = cvPoint( 0, objectImg.rows );
    cv::Mat showResImg;
    sceneImg.copyTo(showResImg);

    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> sceneKeypoints;
    cv::Mat objectDescriptors;
    cv::Mat sceneDescriptors;
    int minHessian = 400;
    cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create( minHessian );
    detector->detect(objectImg, objectKeypoints);
    detector->detect(sceneImg, sceneKeypoints);




    int ind = 0;
    do
    {
        cv::Ptr<cv::DescriptorExtractor> extractor;
        extractor = cv::xfeatures2d::SIFT::create();
        extractor->compute(objectImg, objectKeypoints, objectDescriptors);
        extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);

        cv::Mat results, dists;
        int k=2;

        // Create Flann KDTree index
        cv::flann::Index flannIndex(sceneDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

        // search (nearest neighbor)
        flannIndex.knnSearch(objectDescriptors, results, dists, k, cv::flann::SearchParams() );

        // PROCESS NEAREST NEIGHBOR RESULTS
        // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
        std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
        std::vector<int> indexes_1, indexes_2; // Used for homography
        std::vector<uchar> outlier_mask;  // Used for homography

        for(unsigned int i=0; i<objectDescriptors.rows; ++i)
        {
            // Check if this descriptor matches with those of the objects
            // Apply NNDR
            float nndrRatio = 0.8f;
            if( results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
            {
                mpts_1.push_back(objectKeypoints.at(i).pt);
                indexes_1.push_back(i);

                mpts_2.push_back(sceneKeypoints.at(results.at<int>(i,0)).pt);
                indexes_2.push_back(results.at<int>(i,0));
            }
        }

        // FIND HOMOGRAPHY
        int nbMatches = 8;
        if(mpts_1.size() >= nbMatches)
        {
            cv::Mat H = findHomography(mpts_1,
                    mpts_2,
                    cv::RANSAC,
                    1.0,
                    outlier_mask);
            std::cout << "H: " << H << std::endl;

            // Do what you want with the homography (like showing a rectangle)
            // The "outlier_mask" contains a mask representing the inliers and outliers.
            // ...

            perspectiveTransform( objCorners, sceneCorners, H);
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( showResImg, sceneCorners[0] ,
                        sceneCorners[1] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[1] , 
                         sceneCorners[2] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[2] , 
                        sceneCorners[3] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[3] , 
                         sceneCorners[0] , cv::Scalar( 0, 255, 0), 4 );

        }

//        //--Remove all used
        for (unsigned int idx=0; idx<outlier_mask.size(); idx++)
        {
            if ( outlier_mask[idx] )
            {
                int indErase = indexes_1[idx];
                sceneKeypoints.erase( sceneKeypoints.begin()+indErase );
            }
        }

    ind++;
    } while( ind < 3 );

    cv::imshow("Matching Result", showResImg);
    cv::waitKey();

    return 0;
}

Scene:multiLena

Feature Matching; findObject: Concept behind MultiDetection

Hello everyone, for the feature-matching modul there is a great GUI-interface from IntRoLab on github.
Unfortunately I am not able to understand the idea behind the multidetection from the source code. The concept is descriped here:

With the outliners of the Ransac from findHomography another matching is performed.

I don't get the following about this concept: With multiple objects the scene, there are indeed a lot more keypoints from the sceen as from the object and also the number of descriptor variees. But if i do a nearest neighbor search I always got the same number of Features to track. This seems to be the logical as it is the purpose of this step, but makes the described concept of multidetection impossible, doesn't it? Also if i filter the descriptors (remove the ones, which are already used), I can't be sure that all are from one objects, can I?

It would be nice if someone could explain the idea behind this concept a bit more detailed (in steps). Thanks a lot!

edit: I tried to implement the reduction i tried to remove the keypoints of the scene as pointed out by StevenPuttemans but just one instance in the scene is found

    int main()
    {
    std::string inputScene = "Scene.bmp";
    std::string inputObject = "Object.bmp";

    // LOAD IMAGES (as grayscale)
    cv::Mat objectImg = cv::imread(inputObject, cv::IMREAD_GRAYSCALE);
    cv::Mat sceneImg = cv::imread(inputScene, cv::IMREAD_GRAYSCALE);

    std::vector<cv::Point2f> objCorners(4), sceneCorners(4);
    objCorners[0] = cv::Point(0,0); 
    objCorners[1] = cvPoint( objectImg.cols, 0 );
    objCorners[2] = cv::Point( objectImg.cols, objectImg.rows ); 
    objCorners[3] = cvPoint( 0, objectImg.rows );
    cv::Mat showResImg;
    sceneImg.copyTo(showResImg);

    std::vector<cv::KeyPoint> objectKeypoints;
    std::vector<cv::KeyPoint> sceneKeypoints;
    cv::Mat objectDescriptors;
    cv::Mat sceneDescriptors;
    int minHessian = 400;
    cv::Ptr<cv::FeatureDetector> detector = cv::xfeatures2d::SURF::create( minHessian );
    detector->detect(objectImg, objectKeypoints);
    detector->detect(sceneImg, sceneKeypoints);




    int ind = 0;
    do
    {
        cv::Ptr<cv::DescriptorExtractor> extractor;
        extractor = cv::xfeatures2d::SIFT::create();
        extractor->compute(objectImg, objectKeypoints, objectDescriptors);
        extractor->compute(sceneImg, sceneKeypoints, sceneDescriptors);

        cv::Mat results, dists;
        int k=2;

        // Create Flann KDTree index
        cv::flann::Index flannIndex(sceneDescriptors, cv::flann::KDTreeIndexParams(), cvflann::FLANN_DIST_EUCLIDEAN);

        // search (nearest neighbor)
        flannIndex.knnSearch(objectDescriptors, results, dists, k, cv::flann::SearchParams() );

        // PROCESS NEAREST NEIGHBOR RESULTS
        // Find correspondences by NNDR (Nearest Neighbor Distance Ratio)
        std::vector<cv::Point2f> mpts_1, mpts_2; // Used for homography
        std::vector<int> indexes_1, indexes_2; // Used for homography
        std::vector<uchar> outlier_mask;  // Used for homography

        for(unsigned int i=0; i<objectDescriptors.rows; ++i)
        {
            // Check if this descriptor matches with those of the objects
            // Apply NNDR
            float nndrRatio = 0.8f;
            if( results.at<int>(i,0) >= 0 && results.at<int>(i,1) >= 0 && dists.at<float>(i,0) <= nndrRatio * dists.at<float>(i,1))
            {
                mpts_1.push_back(objectKeypoints.at(i).pt);
                indexes_1.push_back(i);

                mpts_2.push_back(sceneKeypoints.at(results.at<int>(i,0)).pt);
                indexes_2.push_back(results.at<int>(i,0));
            }
        }

        // FIND HOMOGRAPHY
        int nbMatches = 8;
        if(mpts_1.size() >= nbMatches)
        {
            cv::Mat H = findHomography(mpts_1,
                    mpts_2,
                    cv::RANSAC,
                    1.0,
                    outlier_mask);
            std::cout << "H: " << H << std::endl;

            // Do what you want with the homography (like showing a rectangle)
            // The "outlier_mask" contains a mask representing the inliers and outliers.
            // ...

            perspectiveTransform( objCorners, sceneCorners, H);
            //-- Draw lines between the corners (the mapped object in the scene - image_2 )
            cv::line( showResImg, sceneCorners[0] ,
                        sceneCorners[1] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[1] , 
                         sceneCorners[2] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[2] , 
                        sceneCorners[3] , cv::Scalar( 0, 255, 0), 4 );
            cv::line( showResImg, sceneCorners[3] , 
                         sceneCorners[0] , cv::Scalar( 0, 255, 0), 4 );

        }

//        //--Remove all used
        for (unsigned int idx=0; idx<outlier_mask.size(); idx++)
        {
            if ( outlier_mask[idx] )
            {
                int indErase = indexes_1[idx];
                sceneKeypoints.erase( sceneKeypoints.begin()+indErase );
            }
        }

    ind++;
    } while( ind < 3 );

    cv::imshow("Matching Result", showResImg);
    cv::waitKey();

    return 0;
}