Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

undistortPoints not giving the exact inverse of distortion model.

Hello,

I was doing some tests using the distortion model of OpenCV. Basically what I did is, implement the distortion equations and see if the cv::undistortPoints function gives me the inverse of these equations. I realized that cv::undistortPoints does not exactly give you the inverse of the distortion equations. When I saw this, I went to the implementation of cv::undistortPoints and realized that in the end condition of the iterative process of computing the inverse of the distortion model, OpenCV always does 5 iterations (if there are no distortion coefficients provided to the function it actually does 0 iterations) and does not use any error metric on the undistorted point to see if it is precisely undistorted. Haveing this in mind, I copied and modified the termination condition of the iteration process to take and error metrics into account. This gave me the exact inverse of the distortion model. The code showing this is attached at the end of this post. My question is:

Does this happen because OpenCV prefers performance (spending a bit less time) over accuracy (spending a bit more time) or is this just a "bug"? (it is obvious that with the termination condition that I propose the function will take more time to undistort each point)

Thank you very much!

#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>

using namespace cv;

// This is a copy of the opencv implementation
void cvUndistortPoints_copy( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix,
                   const CvMat* _distCoeffs,
                   const CvMat* matR, const CvMat* matP )
{
    double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy;
    CvMat matA=cvMat(3, 3, CV_64F, A), _Dk;
    CvMat _RR=cvMat(3, 3, CV_64F, RR);
    const CvPoint2D32f* srcf;
    const CvPoint2D64f* srcd;
    CvPoint2D32f* dstf;
    CvPoint2D64f* dstd;
    int stype, dtype;
    int sstep, dstep;
    int i, j, n, iters = 1;

    CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) &&
        (_src->rows == 1 || _src->cols == 1) &&
        (_dst->rows == 1 || _dst->cols == 1) &&
        _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 &&
        (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) &&
        (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2));

    CV_Assert( CV_IS_MAT(_cameraMatrix) &&
        _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 );

    cvConvert( _cameraMatrix, &matA );

    if( _distCoeffs )
    {
        CV_Assert( CV_IS_MAT(_distCoeffs) &&
            (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) &&
            (_distCoeffs->rows*_distCoeffs->cols == 4 ||
             _distCoeffs->rows*_distCoeffs->cols == 5 ||
             _distCoeffs->rows*_distCoeffs->cols == 8));

        _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols,
            CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k);

        cvConvert( _distCoeffs, &_Dk );
        iters = 5;
    }

    if( matR )
    {
        CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 );
        cvConvert( matR, &_RR );
    }
    else
        cvSetIdentity(&_RR);

    if( matP )
    {
        double PP[3][3];
        CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP);
        CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4));
        cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP );
        cvMatMul( &_PP, &_RR, &_RR );
    }

    srcf = (const CvPoint2D32f*)_src->data.ptr;
    srcd = (const CvPoint2D64f*)_src->data.ptr;
    dstf = (CvPoint2D32f*)_dst->data.ptr;
    dstd = (CvPoint2D64f*)_dst->data.ptr;
    stype = CV_MAT_TYPE(_src->type);
    dtype = CV_MAT_TYPE(_dst->type);
    sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype);
    dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype);

    n = _src->rows + _src->cols - 1;

    fx = A[0][0];
    fy = A[1][1];
    ifx = 1./fx;
    ify = 1./fy;
    cx = A[0][2];
    cy = A[1][2];

    for( i = 0; i < n; i++ )
    {
        double x, y, x0, y0;
        if( stype == CV_32FC2 )
        {
            x = srcf[i*sstep].x;
            y = srcf[i*sstep].y;
        }
        else
        {
            x = srcd[i*sstep].x;
            y = srcd[i*sstep].y;
        }

        x0 = x = (x - cx)*ifx;
        y0 = y = (y - cy)*ify;

        // compensate distortion iteratively
        int max_iters(500);
        double e(1);
        for( j = 0; j < max_iters && e>0; j++ )
        {
            double r2 = x*x + y*y;
            double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
            double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
            double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
            double xant = x;
            double yant = y;
            x = (x0 - deltaX)*icdist;
            y = (y0 - deltaY)*icdist;
            e = pow(xant - x,2)+ pow(yant - y,2);
        }

        double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2];
        double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2];
        double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]);
        x = xx*ww;
        y = yy*ww;

        if( dtype == CV_32FC2 )
        {
            dstf[i*dstep].x = (float)x;
            dstf[i*dstep].y = (float)y;
        }
        else
        {
            dstd[i*dstep].x = x;
            dstd[i*dstep].y = y;
        }
    }
}
void undistortPoints_copy( InputArray _src, OutputArray _dst,
                          InputArray _cameraMatrix,
                          InputArray _distCoeffs,
                          InputArray _Rmat=noArray(),
                          InputArray _Pmat=noArray() )
{
    Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
    Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();

    CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
              ((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));

    _dst.create(src.size(), src.type(), -1, true);
    Mat dst = _dst.getMat();

    CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix;
    CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0;
    if( R.data )
        pR = &(matR = R);
    if( P.data )
        pP = &(matP = P);
    if( distCoeffs.data )
        pD = &(_cdistCoeffs = distCoeffs);
    cvUndistortPoints_copy(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP);
}

// Distortion model implementation
cv::Point2d distortPoint(cv::Point2d undistorted_point,cv::Mat camera_matrix, std::vector<double> distort_coefficients){

    // Check that camera matrix is double
    if (!(camera_matrix.type() == CV_64F || camera_matrix.type() == CV_64FC1)){
        std::ostringstream oss;
        oss<<"distortPoint(): Camera matrix type is wrong. It has to be a double matrix (CV_64)";
        throw std::runtime_error(oss.str());
    }

    // Create distorted point
    cv::Point2d distortedPoint;
    distortedPoint.x = (undistorted_point.x - camera_matrix.at<double>(0,2))/camera_matrix.at<double>(0,0);
    distortedPoint.y = (undistorted_point.y - camera_matrix.at<double>(1,2))/camera_matrix.at<double>(1,1);

    // Get model
    if (distort_coefficients.size() < 4 || distort_coefficients.size() > 8 ){
        throw std::runtime_error("distortPoint(): Invalid numbrer of distortion coefficitnes.");
    }
    double k1(distort_coefficients[0]);
    double k2(distort_coefficients[1]);
    double p1(distort_coefficients[2]);// tangent distortion first coeficinet
    double p2(distort_coefficients[3]);// tangent distortion second coeficinet
    double k3(0);
    double k4(0);
    double k5(0);
    double k6(0);
    if (distort_coefficients.size() > 4)
        k3 = distort_coefficients[4];
    if (distort_coefficients.size() > 5)
        k4 = distort_coefficients[5];
    if (distort_coefficients.size() > 6)
        k5 = distort_coefficients[6];
    if (distort_coefficients.size() > 7)
        k6 = distort_coefficients[7];

    // Distort
    double xcx = distortedPoint.x;
    double ycy = distortedPoint.y;
    double r2 = pow(xcx, 2) + pow(ycy, 2);
    double r4 = pow(r2,2);
    double r6 = pow(r2,3);
    double k = (1+k1*r2+k2*r4+k3*r6)/(1+k4*r2+k5*r4+k5*r6);
    distortedPoint.x = xcx*k + 2*p1*xcx*ycy + p2*(r2+2*pow(xcx,2));
    distortedPoint.y = ycy*k + p1*(r2+2*pow(ycy,2)) + 2*p2*xcx*ycy;
    distortedPoint.x = distortedPoint.x*camera_matrix.at<double>(0,0)+camera_matrix.at<double>(0,2);
    distortedPoint.y = distortedPoint.y*camera_matrix.at<double>(1,1)+camera_matrix.at<double>(1,2);

    // Exit
    return distortedPoint;
}

int main(int argc, char** argv){

    // Camera matrix
    double cam_mat_da[] = {1486.58092,0,1046.72507,0,1489.8659,545.374244,0,0,1};
    cv::Mat cam_mat(3,3,CV_64FC1,cam_mat_da);

    // Distortion coefficients
    double dist_coefs_da[] ={-0.13827409,0.29240721,-0.00088197,-0.00090189,0};
    std::vector<double> dist_coefs(dist_coefs_da,dist_coefs_da+5);

    // Distorted Point
    cv::Point2d p0(0,0);
    std::vector<cv::Point2d> p0_v;
    p0_v.push_back(p0);

    // Undistort Point
    std::vector<cv::Point2d> ud_p_v;
    cv::undistortPoints(p0_v,ud_p_v,cam_mat,dist_coefs);
    cv::Point2d ud_p = ud_p_v[0];
    ud_p.x = ud_p.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
    ud_p.y = ud_p.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);

    // Redistort Point
    cv::Point2d p = distortPoint(ud_p, cam_mat,dist_coefs);

    // Undistort Point using own termination of iterative process
    std::vector<cv::Point2d> ud_p_v_local;
    undistortPoints_copy(p0_v,ud_p_v_local,cam_mat,dist_coefs);
    cv::Point2d ud_p_local = ud_p_v_local[0];
    ud_p_local.x = ud_p_local.x*cam_mat.at<double>(0,0)+cam_mat.at<double>(0,2);
    ud_p_local.y = ud_p_local.y*cam_mat.at<double>(1,1)+cam_mat.at<double>(1,2);

    // Redistort Point
    cv::Point2d p_local = distortPoint(ud_p_local, cam_mat,dist_coefs);

    // Display results
    std::cout<<"Distorted original point: "<<p0<<std::endl;
    std::cout<<"Undistorted point (CV): "<<ud_p<<std::endl;
    std::cout<<"Distorted point (CV): "<<p<<std::endl;
    std::cout<<"Erorr in the distorted point (CV): "<<sqrt(pow(p.x-p0.x,2)+pow(p.y-p0.y,2))<<std::endl;
    std::cout<<"Undistorted point (Local): "<<ud_p_local<<std::endl;
    std::cout<<"Distorted point (Local): "<<p_local<<std::endl;
    std::cout<<"Erorr in the distorted point (Local): "<<sqrt(pow(p_local.x-p0.x,2)+pow(p_local.y-p0.y,2))<<std::endl;

    // Exit
    return 0;
}