Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

OpenCV Assertion failed when using perspective transform

Hey all! I am using Aruco AR library to make a project. My project is based on the following concept: I would like do define the camera position in world coordinates system using AR markers. The markers positions are known in world coordinate system. If I could define the distance of each markers relating to my camera I could define the world coordinates of my camera. For this purpose I would need to define minimum two markers coordinates. In this way I would get two result coordinates of the camera, so for the proper solution I would need three detected markers with the distance estimations.

I have successfully done the marker detection, and I also can draw f.e a cube on them, so the the camera calibration is valid, and everything works fine. I have the following problem. When I try to use the cv::projectPoints function, i got the following errors:

OpenCV Error: Assertion failed (npoints >= 0 && (depth == CV_32F || depth == CV_64F)) in projectPoints, file /home/mirind4/DiplomaMSC/OpenCV/opencv-2.4.10/modules/calib3d/src/calibration.cpp, line 3349
Exception :/home/mirind4/DiplomaMSC/OpenCV/opencv-2.4.10/modules/calib3d/src/calibration.cpp:3349: error: (-215) npoints >= 0 && (depth == CV_32F || depth == CV_64F) in function projectPoints

I tried to find solution for this error, and I have found a seemingly good one Link to the question, but in my code I do not know how to define the type of datas to "CvType.CV_32FC2"

I would like to ask two question: What would you advise for this error? Do you think that my concept of the camera pose estimation is good?

Thanks in advance!

My source code is the following: (I am using the Aruco with python, so that's why you can see the "BOOST_PYTHON_MODULE" part at the end of it.

#include <iostream>
#include <aruco/aruco.h>
#include <aruco/cvdrawingutils.h>
#include <opencv2/highgui/highgui.hpp>
#include <fstream>
#include <sstream>
#include <boost/python.hpp>

using namespace cv;
using namespace aruco;

string TheInputVideo;
string TheIntrinsicFile;
float TheMarkerSize=45;
int ThePyrDownLevel;
MarkerDetector MDetector;
VideoCapture TheVideoCapturer;
vector<Marker> TheMarkers;
//projected points vector
std::vector<cv::Point2f> projectedPoints;

Mat TheInputImage,TheInputImageCopy;
CameraParameters TheCameraParameters;
void cvTackBarEvents(int pos,void*);
bool readCameraParameters(string TheIntrinsicFile,CameraParameters &CP,Size size);

pair<double,double> AvrgTime(0,0) ;//determines the average time required for detection
double ThresParam1,ThresParam2;
int iThresParam1,iThresParam2;
int waitTime=0;


void Init(){
    int vIdx = 1;
    cout<<"Opening camera index "<<vIdx<<endl;
    TheVideoCapturer.open(vIdx);
    waitTime=10;

    TheVideoCapturer>>TheInputImage;

    //Camera paramteres
        TheCameraParameters.readFromXMLFile("camera.xml");
        TheCameraParameters.resize(TheInputImage.size());
        cout<<"Camera paramteres valid? : " << TheCameraParameters.isValid()<<endl<<std::flush;
    //Create gui
        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        MDetector.getThresholdParams( ThresParam1,ThresParam2);       
        MDetector.setCornerRefinementMethod(MarkerDetector::SUBPIX);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
}



int yay()
{
    try{

            char key=0;
            //capture until press ESC or until the end of the video
            do{
            TheVideoCapturer.retrieve( TheInputImage);

            //Detection of markers in the image passed
                    MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);           
            //print marker info and draw the markers in image
                    TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
                        TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
                        //cout<<TheMarkers[i].id<<std::flush;
                        cout<<"Tvec : "<<TheMarkers[i].Tvec<<endl;
                        cout<<"Rvec : "<<TheMarkers[i].Rvec<<endl;
                        //CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                        cv::projectPoints(TheMarkers[i],TheMarkers[i].Rvec,TheMarkers[i].Tvec,TheCameraParameters.CameraMatrix,TheCameraParameters.Distorsion,projectedPoints);
                    }

            ;

            cv::imshow("in",TheInputImageCopy);
            cv::imshow("thres",MDetector.getThresholdedImage());

            key=cv::waitKey(waitTime);//wait for key to be pressed
        }while(key!=27 && TheVideoCapturer.grab());

        cv::destroyAllWindows();

        return TheMarkers.size();


    } 
    catch (std::exception &ex){
            cout<<"Exception :"<<ex.what()<<endl;
        cv::destroyAllWindows();
        return 0;
    }
}




void cvTackBarEvents(int pos,void*)
{
    if (iThresParam1<3) iThresParam1=3;
    if (iThresParam1%2!=1) iThresParam1++;
    if (ThresParam2<1) ThresParam2=1;
    ThresParam1=iThresParam1;
    ThresParam2=iThresParam2;
    MDetector.setThresholdParams(ThresParam1,ThresParam2);
//recompute
    MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters);
    TheInputImage.copyTo(TheInputImageCopy);
    for (unsigned int i=0;i<TheMarkers.size();i++)  TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
    cv::imshow("in",TheInputImageCopy);
    cv::imshow("thres",MDetector.getThresholdedImage());
}






BOOST_PYTHON_MODULE(libTestPyAruco)
{
  using namespace boost::python;
  def("yay", yay);
  def("Init", Init);
}

The "running" python file:

import libTestPyAruco

libTestPyAruco.Init()
print libTestPyAruco.yay()