Hello everyone, I'm new to OpenCV, so it could be that it's just a understanding problem with the estimateRigidTransformation function:
In the following code i find the contours of two rigid translated objects in img1 and 2, but estimateRigidTransformation seems not to work like i thought it would. It would be nice if someone has an idea why the mat dst keeps empty. Thank you!
#include <iostream>
#include <string>
#include <opencv2/highgui/highgui.
#include <opencv2/video/tracking.hpp>
//Function from https://github.com/opencv/opencv/blob/master/samples/cpp/shape_example.cpp to extract Contours
static std::vector<cv::Point> sampleContour( const cv::Mat& image, int n=300 )
{
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Point> all_points;
cv::findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
// In case actual number of points is less than n
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// Uniformly sampling
std::random_shuffle(all_points.begin(), all_points.end());
std::vector<cv::Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(){
// image reading
cv::Mat templateImage = cv::imread("1.jpg", cv::IMREAD_GRAYSCALE);
cv::Mat queryImage = cv::imread("2.jpg", cv::IMREAD_GRAYSCALE);
// contour extraction
std::vector<cv::Point> queryPoints, templatePoints;
queryPoints = sampleContour(queryImage);
templatePoints = sampleContour(templateImage);
// cast to vector<point2f> https://stackoverflow.com/questions/7386210/convert-opencv-2-vectorpoint2i-to-vectorpoint2f
std::vector<cv::Point2f> queryPoints2f, templatePoints2f;
cv::Mat(queryPoints).convertTo(queryPoints2f, cv::Mat(queryPoints2f).type());
cv::Mat(templatePoints).convertTo(templatePoints2f, cv::Mat(templatePoints2f).type());
cv::Mat R = cv::estimateRigidTransform(templatePoints2f,queryPoints2f,false);
std::cout <<"R:" << R << std::endl; // R -> empty
/*
* Solution from https://stackoverflow.com/questions/23373077/using-estimaterigidtransform-instead-of-findhomography
* let the program crash
*
cv::Mat H = cv::Mat(3,3,R.type());
H.at<double>(0,0) = R.at<double>(0,0);
H.at<double>(0,1) = R.at<double>(0,1);
H.at<double>(0,2) = R.at<double>(0,2);
H.at<double>(1,0) = R.at<double>(1,0);
H.at<double>(1,1) = R.at<double>(1,1);
H.at<double>(1,2) = R.at<double>(1,2);
H.at<double>(2,0) = 0.0;
H.at<double>(2,1) = 0.0;
H.at<double>(2,2) = 1.0;
std::vector<cv::Point2f> result;
cv::perspectiveTransform(templatePoints2f,result,H);
for(unsigned int i=0; i<result.size(); ++i)
std::cout << result[i] << std::endl;
*/
return 0;
}