Ask Your Question

Revision history [back]

I want stitching third image with findHomography

I have a problem went I use findHomography in first times it work. (stitching image1 and image2)

C:\fakepath\Screenshot.png

But when I warpPerspective third image with homography it show result in this picture.

C:\fakepath\Screenshot2.png

My sorcecode

#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;
using namespace std;

void readme();

int main( int argc, char** argv )
{

if( argc != 5 ){ readme(); return -1; }

Mat image = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat image1 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE);
Mat image2 = imread( argv[3], CV_LOAD_IMAGE_GRAYSCALE);
Mat featureImage = imread( argv[4], CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat result;

// Construct the SURF feature detector object
cv::SurfFeatureDetector surf(2300); // threshold
// Construction of the SURF descriptor extractor
cv::SurfDescriptorExtractor surfDesc;

Mat right_Image;
Mat left_Image;
std::vector<cv::KeyPoint> right_keypoints, left_keypoints;
cv::Mat left_descriptors, right_descriptors;

FlannBasedMatcher matcher;
std::vector<DMatch> matches;
std::vector< DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
Mat H;

for(int i=0;i<2;i++){

    if(i==0){
        right_Image = image1;
        left_Image = image;
    }else if(i>=1){
        right_Image = image2;
        left_Image = result;
        result.release();
    }else{
        exit(-1);   
    }

    ///////////////////////////////// image 0 ////////////////////////////////////

    // Detect the SURF features
    surf.detect(right_Image,right_keypoints);
    std::cout << "right_keypoints : " << right_keypoints.size() << '\n';
    surfDesc.compute(right_Image,right_keypoints,right_descriptors);

    // Draw the keypoints with scale and orientation information
    cv::drawKeypoints(right_Image,              // original image
        right_keypoints,                    // vector of keypoints
        featureImage,                   // the resulting image
        cv::Scalar(255,255,255),            // color of the points
        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);     //flag

////////////////////////////// end image 0 //////////////////////////////////

///////////////////////////////// image 0 ////////////////////////////////////

    // Detect the SURF features
    surf.detect(left_Image,left_keypoints);
    std::cout << "left_keypoints : " << left_keypoints.size() << '\n';

    surfDesc.compute(left_Image,left_keypoints,left_descriptors);
    // Draw the keypoints with scale and orientation information
    cv::drawKeypoints(left_Image,               // original image
        left_keypoints,                 // vector of keypoints
        featureImage,                   // the resulting image
        cv::Scalar(255,255,255),            // color of the points
        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);     //flag

////////////////////////////// end image 0 //////////////////////////////////

/////////////////////////// Match image & image1 ////////////////////////////
//-- Step 3: Matching descriptor vectors using FLANN matcher
    matcher.match(right_descriptors, left_descriptors, matches);
    std::cout << "matches : " << matches.size() << '\n';

    double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
    for( int j = 0; j < right_descriptors.rows; j++ )
    { 
        double dist = matches[j].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    printf("-- Max dist image & image1 : %f \n", max_dist );
    printf("-- Min dist image & image1 : %f \n", min_dist );

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )

    for(int j = 0; j < right_descriptors.rows; j++)
    { 
        if(matches[j].distance <= 3*min_dist)
            { 
            good_matches.push_back(matches[j]); 
        }
    }

    std::cout << "good_matches : " << good_matches.size() << '\n';

    Mat img_matches;
    drawMatches( right_Image, right_keypoints, left_Image, left_keypoints,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //-- Localize the object

    for( int j = 0; j < good_matches.size(); j++ )
    {
    //-- Get the keypoints from the good matches
        obj.push_back( right_keypoints[ good_matches[j].queryIdx ].pt );
        scene.push_back( left_keypoints[ good_matches[j].trainIdx ].pt );
    }

    H = findHomography( obj, scene, CV_RANSAC, 1. );
    cout << "H = "<< endl << " "  << H << endl << endl;

    // Warp image 1 to image 2
    cv::warpPerspective(right_Image,            // input image
        result,                 // output image
        H,                  // homography
        cv::Size(2*right_Image.cols, right_Image.rows)); 

    if(i == 1){
        imshow( "Good Matches & Object detection", result);
    }

    // Copy image 1 on the first half of full image
    cv::Mat half(result,cv::Rect(0,0,left_Image.cols,left_Image.rows));
    left_Image.copyTo(half); // copy image2 to image1 roi

    //-- Show detected matches

    matches.clear();
    good_matches.clear();
    obj.clear();
    scene.clear();
}   


    waitKey(0);
    return 0;
}

/**
 * @function readme
 */
void readme()
{ std::cout << " Usage: ./BallMatch <input1> <input2> <output>" << std::endl; }

Thank!! and sorry about my bad language.

I want stitching third image with findHomography

I have a problem went I use findHomography in first times it work. (stitching image1 and image2)

C:\fakepath\Screenshot.png

But when I warpPerspective third image with homography it show result in this picture.

C:\fakepath\Screenshot2.png

My sorcecode

#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;
using namespace std;

void readme();

int main( int argc, char** argv )
{

if( argc != 5 ){ readme(); return -1; }

Mat image = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat image1 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE);
Mat image2 = imread( argv[3], CV_LOAD_IMAGE_GRAYSCALE);
Mat featureImage = imread( argv[4], CV_LOAD_IMAGE_GRAYSCALE);
cv::Mat result;

// Construct the SURF feature detector object
cv::SurfFeatureDetector surf(2300); // threshold
// Construction of the SURF descriptor extractor
cv::SurfDescriptorExtractor surfDesc;

Mat right_Image;
Mat left_Image;
std::vector<cv::KeyPoint> right_keypoints, left_keypoints;
cv::Mat left_descriptors, right_descriptors;

FlannBasedMatcher matcher;
std::vector<DMatch> matches;
std::vector< DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
Mat H;

for(int i=0;i<2;i++){

    if(i==0){
        right_Image = image1;
        left_Image = image;
    }else if(i>=1){
        right_Image = image2;
        left_Image = result;
        result.release();
    }else{
        exit(-1);   
    }

    ///////////////////////////////// image 0 ////////////////////////////////////

    // Detect the SURF features
    surf.detect(right_Image,right_keypoints);
    std::cout << "right_keypoints : " << right_keypoints.size() << '\n';
    surfDesc.compute(right_Image,right_keypoints,right_descriptors);

    // Draw the keypoints with scale and orientation information
    cv::drawKeypoints(right_Image,              // original image
        right_keypoints,                    // vector of keypoints
        featureImage,                   // the resulting image
        cv::Scalar(255,255,255),            // color of the points
        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);     //flag

////////////////////////////// end image 0 //////////////////////////////////

///////////////////////////////// image 0 ////////////////////////////////////

    // Detect the SURF features
    surf.detect(left_Image,left_keypoints);
    std::cout << "left_keypoints : " << left_keypoints.size() << '\n';

    surfDesc.compute(left_Image,left_keypoints,left_descriptors);
    // Draw the keypoints with scale and orientation information
    cv::drawKeypoints(left_Image,               // original image
        left_keypoints,                 // vector of keypoints
        featureImage,                   // the resulting image
        cv::Scalar(255,255,255),            // color of the points
        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);     //flag

////////////////////////////// end image 0 //////////////////////////////////

/////////////////////////// Match image & image1 ////////////////////////////
//-- Step 3: Matching descriptor vectors using FLANN matcher
    matcher.match(right_descriptors, left_descriptors, matches);
    std::cout << "matches : " << matches.size() << '\n';

    double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
    for( int j = 0; j < right_descriptors.rows; j++ )
    { 
        double dist = matches[j].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    printf("-- Max dist image & image1 : %f \n", max_dist );
    printf("-- Min dist image & image1 : %f \n", min_dist );

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )

    for(int j = 0; j < right_descriptors.rows; j++)
    { 
        if(matches[j].distance <= 3*min_dist)
            { 
            good_matches.push_back(matches[j]); 
        }
    }

    std::cout << "good_matches : " << good_matches.size() << '\n';

    Mat img_matches;
    drawMatches( right_Image, right_keypoints, left_Image, left_keypoints,
               good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
               vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //-- Localize the object

    for( int j = 0; j < good_matches.size(); j++ )
    {
    //-- Get the keypoints from the good matches
        obj.push_back( right_keypoints[ good_matches[j].queryIdx ].pt );
        scene.push_back( left_keypoints[ good_matches[j].trainIdx ].pt );
    }

    H = findHomography( obj, scene, CV_RANSAC, 1. );
    cout << "H = "<< endl << " "  << H << endl << endl;

    // Warp image 1 to image 2
    cv::warpPerspective(right_Image,            // input image
        result,                 // output image
        H,                  // homography
        cv::Size(2*right_Image.cols, right_Image.rows)); 

    if(i == 1){
        imshow( "Good Matches & Object detection", result);
    }

    // Copy image 1 on the first half of full image
    cv::Mat half(result,cv::Rect(0,0,left_Image.cols,left_Image.rows));
    left_Image.copyTo(half); // copy image2 to image1 roi

    //-- Show detected matches

    matches.clear();
    good_matches.clear();
    obj.clear();
    scene.clear();
}   


    waitKey(0);
    return 0;
}

/**
 * @function readme
 */
void readme()
{ std::cout << " Usage: ./BallMatch <input1> <input2> <output>" << std::endl; }

Thank!! and sorry about my bad language.