Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Assertion failed (scn + 1 == m.cols) in perspectiveTransform

I am new in Opencv, I want to do Real time object recognition using SURF but I get error about OpenCV Error: Assertion failed (scn + 1 == m.cols) in perspectiveTransform, file /opencv/modules/core/src/matmul.cpp, line 2299 terminate called after throwing an instance of 'cv::Exception' what(): /opencv/modules/core/src/matmul.cpp:2299: error: (-215) scn + 1 == m.cols in function perspectiveTransform

Aborted (core dumped)

Below is my coding :

include <stdio.h>

include <iostream>

include <fstream>

include "opencv2/core.hpp"

include "opencv2/imgproc.hpp"

include "opencv2/features2d.hpp"

include "opencv2/highgui.hpp"

include "opencv2/calib3d.hpp"

include "opencv2/xfeatures2d.hpp"

using namespace cv; using namespace std; using namespace cv::xfeatures2d;

int framecount = 0;

int main( int argc, char** argv ) { Mat img_object = imread("arrow_left.jpg", CV_LOAD_IMAGE_GRAYSCALE);

if( !img_object.data) { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

//-- Step 1: Detect the keypoints and extract descriptors using SURF int minHessian = 400; Ptr<surf> detector = SURF::create( minHessian ); std::vector<keypoint> keypoints_object; Mat descriptors_object; detector->detectAndCompute( img_object, Mat(), keypoints_object, descriptors_object );

//create video capture object VideoCapture cap(0);

//-- Get the corners from the image_1 ( the object to be "detected" ) std::vector<point2f> obj_corners(4); obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 ); obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows ); std::vector<point2f> scene_corners(4);

//while loop for real time detection while(true) { Mat frame, image; cap.read(frame);

if(framecount < 5) { framecount++; continue; }

//converting captured frame into gray scale cvtColor(frame, image, CV_RGB2GRAY);

//extract detectors and descriptor of captured frame
std::vector<keypoint> keypoints_image; Mat descriptors_image; detector->detectAndCompute(image,Mat(), keypoints_image, descriptors_image);

//-- Step 2: Matching descriptor vectors using FLANN matcher FlannBasedMatcher matcher; std::vector<vector<dmatch> > matches; matcher.knnMatch(descriptors_object, descriptors_image, matches, 2); double max_dist = 0; double min_dist = 100;

//used to find right matches std::vector<dmatch> good_matches; for(int i = 0; i < min(descriptors_image.rows-1,(int) matches.size()); i++) { if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) { good_matches.push_back(matches[i][0]); } }

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist ) Mat img_matches; drawMatches( img_object, keypoints_object, frame, keypoints_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

//-- Localize the object std::vector<point2f> obj; std::vector<point2f> scene;

//3 good matches are enough to describe an object as a right match if(good_matches.size() >=3) { for( size_t i = 0; i < good_matches.size(); i++ ) { //-- Get the keypoints from the good matches obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); scene.push_back( keypoints_image[ good_matches[i].trainIdx ].pt ); }

Mat H; try { H = findHomography( obj, scene, CV_RANSAC ); } catch(Exception e) {}

perspectiveTransform( obj_corners, scene_corners, H);

//-- Draw lines between the corners (the mapped object in the scene - image_2 ) line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); } //Show detected matches imshow( "Object detection", img_matches );

//clear array good_matches.clear();

if(waitKey(30) >=0) { break; }

} return 0; }

Anyone can help me?

Assertion failed (scn + 1 == m.cols) in perspectiveTransform

I am new in Opencv, I want to do Real time object recognition using SURF but I get error about OpenCV Error: Assertion failed (scn + 1 == m.cols) in perspectiveTransform, file /opencv/modules/core/src/matmul.cpp, line 2299 terminate called after throwing an instance of 'cv::Exception' what(): /opencv/modules/core/src/matmul.cpp:2299: error: (-215) scn + 1 == m.cols in function perspectiveTransform

Aborted (core dumped)

Below is my coding :

include <stdio.h>

include <iostream>

include <fstream>

include "opencv2/core.hpp"

include "opencv2/imgproc.hpp"

include "opencv2/features2d.hpp"

include "opencv2/highgui.hpp"

include "opencv2/calib3d.hpp"

include "opencv2/xfeatures2d.hpp"

#include <stdio.h>
#include <iostream>
#include <fstream>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/xfeatures2d.hpp"

using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;

cv::xfeatures2d; int framecount = 0;

0; int main( int argc, char** argv ) { Mat img_object = imread("arrow_left.jpg", CV_LOAD_IMAGE_GRAYSCALE);

CV_LOAD_IMAGE_GRAYSCALE); if( !img_object.data) { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

} //-- Step 1: Detect the keypoints and extract descriptors using SURF int minHessian = 400; Ptr<surf> Ptr<SURF> detector = SURF::create( minHessian ); std::vector<keypoint> std::vector<KeyPoint> keypoints_object; Mat descriptors_object; detector->detectAndCompute( img_object, Mat(), keypoints_object, descriptors_object );

); //create video capture object VideoCapture cap(0);

cap(0); //-- Get the corners from the image_1 ( the object to be "detected" ) std::vector<point2f> std::vector<Point2f> obj_corners(4); obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 ); obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows ); std::vector<point2f> scene_corners(4);

std::vector<Point2f> scene_corners(4); //while loop for real time detection while(true) { Mat frame, image; cap.read(frame);

cap.read(frame); if(framecount < 5) { framecount++; continue; }

} //converting captured frame into gray scale cvtColor(frame, image, CV_RGB2GRAY);

CV_RGB2GRAY); //extract detectors and descriptor of captured frame
std::vector<keypoint>
std::vector<KeyPoint> keypoints_image; Mat descriptors_image; detector->detectAndCompute(image,Mat(), keypoints_image, descriptors_image);

descriptors_image); //-- Step 2: Matching descriptor vectors using FLANN matcher FlannBasedMatcher matcher; std::vector<vector<dmatch> std::vector<vector<DMatch> > matches; matcher.knnMatch(descriptors_object, descriptors_image, matches, 2); double max_dist = 0; double min_dist = 100;

100; //used to find right matches std::vector<dmatch> std::vector<DMatch> good_matches; for(int i = 0; i < min(descriptors_image.rows-1,(int) matches.size()); i++) { if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) { good_matches.push_back(matches[i][0]); } }

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist ) Mat img_matches; drawMatches( img_object, keypoints_object, frame, keypoints_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

); //-- Localize the object std::vector<point2f> std::vector<Point2f> obj; std::vector<point2f> scene;

std::vector<Point2f> scene; //3 good matches are enough to describe an object as a right match if(good_matches.size() >=3) { for( size_t i = 0; i < good_matches.size(); i++ ) { //-- Get the keypoints from the good matches obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt ); scene.push_back( keypoints_image[ good_matches[i].trainIdx ].pt ); }

} Mat H; try { H = findHomography( obj, scene, CV_RANSAC ); } catch(Exception e) {}

{} perspectiveTransform( obj_corners, scene_corners, H);

H); //-- Draw lines between the corners (the mapped object in the scene - image_2 ) line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 ); line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 ); } //Show detected matches imshow( "Object detection", img_matches );

); //clear array good_matches.clear();

good_matches.clear(); if(waitKey(30) >=0) { break; }

} } return 0; }

}

Anyone can help me?