Ask Your Question

Revision history [back]

findEssentialMat gives wrong results in android

Hi, I am trying to create a visual odometry app for android using NDK. I am using FAST and KLT and then using findEssentialMat to find the essential matrix. However, it is giving erroneous results. Here's some debug output:

04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: Essential Matrix = 
04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: -0.000000
04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: -1.655305
04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: 29303136256.000000
04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: -182220068263779929277612730010501644288.000000
04-17 23:23:28.313 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: 1.771581
04-17 23:23:28.314 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: 0.000000
04-17 23:23:28.314 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: -598.520691
04-17 23:23:28.314 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: 1.350371
04-17 23:23:28.314 2630-2903/com.example.shaswat.testopencv D/OCVSample::SDK: 152428905045575845543936.000000

The essential matrix values are displayed column-wise. My code is as follows: Java part - onCameraFrame()

public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        this.mCurr = inputFrame.rgba();    
        if(this.firstRun == 1){ 
            this.mCurr = inputFrame.rgba();
            this.mPrev = inputFrame.rgba();
            this.mDisplay = inputFrame.rgba();
            this.firstRun = 0;

            this.mRf = new Mat(3, 3, CvType.CV_32F);
            this.mRf.put(0,0,0,0,0,0,0,0,0,0,0);

            this.mTf = new Mat(3, 1, CvType.CV_32F);
            this.mTf.put(0,0,0,0,0);
        }

        this.controlFrameRate++;

        if(this.controlFrameRate<2)
            return null;

        this.controlFrameRate = 0;

        if(this.firstRun == 0) {

            this.mDisplay = this.mPrev;

            convertNativeGray(mCurr.getNativeObjAddr(), mPrev.getNativeObjAddr(), mDisplay.getNativeObjAddr(), mRf.getNativeObjAddr(), mTf.getNativeObjAddr());

            this.mPrev = this.mCurr;
        }

        return mDisplay;
    }

Native part:

    JNIEXPORT jint JNICALL Java_com_example_shaswat_testopencv_MainActivity_convertNativeGray(JNIEnv*, jobject, jlong addrCurr, jlong addrPrev, jlong addrDisplay, jlong addrRF, jlong addrTF) {

        __android_log_print(ANDROID_LOG_DEBUG, "OCVSample::SDK","In Native Code");

        Mat& mCurr = *(Mat*)addrCurr;
        Mat& mPrev = *(Mat*)addrPrev;
        Mat& mDisplay = *(Mat*)addrDisplay;
        Mat& R_f = *(Mat*)addrRF;
        Mat& t_f = *(Mat*)addrTF;

        int conv = 0;
        jint retVal;

        cvtColor(mCurr, mCurr, CV_RGBA2BGR);
        cvtColor(mPrev, mPrev, CV_RGBA2BGR);
        cvtColor(mDisplay, mDisplay, CV_RGBA2BGR);

        //mDisplay = cv::Mat::zeros(mCurr.rows, mCurr.cols, mCurr.type());

        Mat cameraMatrix = (Mat_<float>(3,3) << 1097.1547, 0.0, 403.9075, 0.0, 821.5675, 298.8437, 0.0, 0.0, 1.0);

        VO::featureOperations odometry(cameraMatrix, mCurr, mPrev, mDisplay, R_f, t_f, 1);
        odometry.calcOdometry(cameraMatrix, mCurr, mPrev, mDisplay, R_f, t_f, 1);

        cvtColor(mCurr, mCurr, CV_BGR2RGBA);
        cvtColor(mPrev, mPrev, CV_BGR2RGBA);
        cvtColor(mDisplay, mDisplay, CV_BGR2RGBA);

        retVal = (jint)conv;

        return retVal;

    }

calcOdometry function:

void VO::featureOperations::calcOdometry(cv::Mat cameraMatrix, cv::Mat currImage, cv::Mat prevImage, cv::Mat& trajectory, cv::Mat& R_f, cv::Mat& t_f, int enableHomography){
        // Change these accordingly. Intrinsics
        double focal = cameraMatrix.at<float>(0,0);
        cv::Point2d pp(cameraMatrix.at<float>(0,2), cameraMatrix.at<float>(1,2));

        // recovering the pose and the essential matrix
        cv::Mat E, R, t, mask;

        std::vector<uchar> status;
        std::vector<cv::Point2f> prevFeatures;
        std::vector<cv::Point2f> currFeatures;

        prevFeatures = this->detectFeatures(prevImage);    

        // For FAST
        if(this->trackFeatures(prevImage,currImage,prevFeatures,currFeatures,status)){
            if(prevFeatures.size()>200 && currFeatures.size()>200)
            {
                E = cv::findEssentialMat(currFeatures, prevFeatures, focal, pp, cv::RANSAC, 0.999, 1.0, mask);
                cv::recoverPose(E, currFeatures, prevFeatures, R, t, focal, pp, mask);
            }
            else
            {
                R = cv::Mat::zeros(3, 3, CV_32F);
                t = cv::Mat::zeros(3, 1, CV_32F);
            }
        }

         // a redetection is triggered in case the number of feautres being trakced go below a particular threshold
        if (prevFeatures.size() < 1800) {
            prevFeatures=this->detectFeatures(prevImage);
            this->trackFeatures(prevImage,currImage,prevFeatures,currFeatures, status);
        }
}

std::vector<cv::Point2f> VO::featureOperations::detectFeatures(cv::Mat img){
   cv::cvtColor(img,img,cv::COLOR_BGR2GRAY);

    // Detect features on this image
    std::vector<cv::Point2f> pointsFAST;
    std::vector<cv::KeyPoint> keypoints_FAST;

    // FAST Detector
    int fast_threshold = 20;
    bool nonmaxSuppression = true;
    cv::FAST(img,keypoints_FAST,fast_threshold,nonmaxSuppression);
    cv::KeyPoint::convert(keypoints_FAST,pointsFAST,std::vector<int>());
    assert(pointsFAST.size() > 0);
    return pointsFAST;
}

bool VO::featureOperations::trackFeatures(cv::Mat prevImg, cv::Mat currentImg, std::vector<cv::Point2f>& points1, std::vector<cv::Point2f>& points2, std::vector<uchar>& status){

    cv::Mat prevImg_gray,currentImg_gray;
    cv::cvtColor(prevImg,prevImg_gray,CV_BGR2GRAY);
    cv::cvtColor(currentImg,currentImg_gray,CV_BGR2GRAY);

    std::vector<float> err;
    cv::Size winSize=cv::Size(21,21);
    cv::TermCriteria termcrit=cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 0.01);


    cv::calcOpticalFlowPyrLK(prevImg_gray, currentImg_gray, points1, points2, status, err, winSize, 3, termcrit, 0, 0.001);

    //getting rid of points for which the KLT tracking failed or those who have gone outside the frame
    int indexCorrection = 0;
    for( int i=0; i<status.size(); i++)
    {  cv::Point2f pt = points2.at(i- indexCorrection);
        if ((status.at(i) == 0)||(pt.x<0)||(pt.y<0))    {
            if((pt.x<0)||(pt.y<0))  {
                status.at(i) = 0;
            }
            points1.erase (points1.begin() + (i - indexCorrection));
            points2.erase (points2.begin() + (i - indexCorrection));
            indexCorrection++;
        }

    }

    if(points1.size()<=5||points2.size()<=5){
        std::cout<<"Previous Features : \n"<<points1<<std::endl;
        std::cout<<"Current Features : \n"<<points2<<std::endl;
    }
    return true;
}

Any help regarding this would be appreciated.