How to create MatchesInfo properly?
I am currently trying to modify the stitcher pipeline to get something a little more robust for a specific purpose. I have only modified the feature extraction and the pairwise matching part. My code looks something close to what follows:
Ptr<detail::FeaturesFinder> features_finder_ = makePtr<detail::SurfFeaturesFinder>();
Ptr<detail::FeaturesMatcher> features_matcher_ =
makePtr<detail::BestOf2NearestMatcher>(try_use_gpu, 0.3f);
std::vector<detail::ImageFeatures> features_;
std::vector<UMat> feature_find_imgs;
(*features_finder_)(feature_find_imgs, features_);
int nmb_imgs = features_.size();
Ptr<DescriptorMatcher> matcher = BFMatcher::create(NORM_L2,false);
std::vector<detail::MatchesInfo> pairwise_matches(nmb_imgs * nmb_imgs);
for(int i=0; i<(int)features_.size()-1; ++i){
detail::MatchesInfo matchInfos[2];
// I know that the images come consecutively from a panorama therefore the following
int idx1 = i;
int idx2 = i+1;
for(int j=0; j<2; ++j){
vector<KeyPoint> & kp1 = features_[idx1].keypoints;
vector<KeyPoint> & kp2 = features_[idx2].keypoints;
Mat des1 = features_[idx1].descriptors.getMat(ACCESS_RW);
Mat des2 = features_[idx2].descriptors.getMat(ACCESS_RW);
std::vector<std::vector<DMatch> > matches;
matcher->knnMatch(des2, des1, matches, 2);
std::vector<DMatch> refined_matches;
// find refined_matches from matches
if(refined_matches.size() < 4){
swap(idx1,idx2);
continue;
}
vector<KeyPoint> kp1Refined(refined_matches.size());
vector<KeyPoint> kp2Refined(refined_matches.size());
for(int i=0; i<(int)refined_matches.size(); ++i){
kp1Refined[i] = kp1[refined_matches[i].trainIdx];
kp2Refined[i] = kp2[refined_matches[i].queryIdx];
}
Mat kp1Mat(1, refined_matches.size(), CV_32FC2);
Mat kp2Mat(1, refined_matches.size(), CV_32FC2);
for(int i=0; i<(int)refined_matches.size(); ++i){
kp1Mat.at<Vec2f>(0,i)[0] = kp1Refined[i].pt.x;
kp1Mat.at<Vec2f>(0,i)[1] = kp1Refined[i].pt.y;
kp2Mat.at<Vec2f>(0,i)[0] = kp2Refined[i].pt.x;
kp2Mat.at<Vec2f>(0,i)[1] = kp2Refined[i].pt.y;
}
Mat mask;
Mat H = findHomography(kp2Mat, kp1Mat, RANSAC, 1.0, mask);
H.convertTo(matchInfos[j].H, CV_64F);
matchInfos[j].matches = refined_matches;
matchInfos[j].src_img_idx = idx1;
matchInfos[j].dst_img_idx = idx2;
matchInfos[j].confidence = 1.0;
matchInfos[j].num_inliers = 0;
matchInfos[j].inliers_mask.clear();
for(int i=0; i<(int)refined_matches.size(); ++i){
if(mask.at<uchar>(i,0) == 1)
matchInfos[j].num_inliers++;
matchInfos[j].inliers_mask.push_back(mask.at<uchar>(i,0));
}
pairwise_matches[idx1 * nmb_imgs + idx2] = matchInfos[j];
swap(idx1,idx2);
}
}
But this does not give at all the desired result. I did not touch the remaining part of the pipeline. If I test with two images the resulting images contains both images but weirdly warped and not all stitched together. I guess I am not constructing the MatchesInfo
correctly, which the pipeline later uses to estimate the camera parameters, bundle adjustment and warping. How do I create the MatchesInfo correctly?