Ask Your Question

Revision history [back]

use opencv to calibrate camera problem

Hi,

I am trying to write a program for fixing camera distortion. I studied the sample code from opencv and also looked at some tutorials online. However, I was having trouble with the calibrateCamera function call. The version of opencv I am using is 2.4.5.

Here is the code that I have:

    #include "stdafx.h"
    #include <cv.h>
    #include <highgui.h>
    using namespace cv;
    using namespace std;


int _tmain(int argc, _TCHAR* argv[])
{
    int numBoards = 0;
    int numCornersHor;
    int numCornersVer;

    printf("Enter number of corners along width: ");
    scanf("%d", &numCornersHor);
    printf("Enter number of corners along height: ");
    scanf("%d", &numCornersVer);
    printf("Enter number of boards: ");
    scanf("%d", &numBoards);

    int numSquares = numCornersHor * numCornersVer;
    Size board_sz = Size(numCornersHor, numCornersVer);

    /* Chessboard images that will be used to calibrate the camera */
    vector<string> imageList(10);
    imageList[0] = "chessboard1.jpg";
    imageList[1] = "chessboard2.jpg";
    imageList[2] = "chessboard3.jpg";
    imageList[3] = "chessboard4.jpg";
    imageList[4] = "chessboard5.jpg";
    imageList[5] = "chessboard6.jpg";
    imageList[6] = "chessboard7.jpg";
    imageList[7] = "chessboard8.jpg";
    imageList[8] = "chessboard9.jpg";
    imageList[9] = "chessboard10.jpg";

    /* physical position of the corners (in 3D space) */
    vector<vector<Point3f>> object_points(1); 
    /* location of corners in the image (in 2D space) */
    vector<vector<Point2f>> image_points;
    /* corners in the current chessboard image */
    vector<Point2f> corners;
    /* ??? */
    int successes = 0;

    Mat image;
    Mat gray_image;

    for (int index = 0; index < numSquares; index++)
    {
        object_points[0].push_back(Point3f(float(index/numCornersHor), float(index % numCornersHor), 0.0f));
    }

    while(successes < numBoards)
    {
        image = imread(imageList[successes], CV_LOAD_IMAGE_COLOR);
        /* convert image into a grayscale image */
        cvtColor(image, gray_image, CV_BGR2GRAY);

        bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        if (found)
        {
            cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray_image, board_sz, corners, found);
            image_points.push_back(corners);
            successes++;
        } 
    }

    object_points.resize(image_points.size(), object_points[0]); 

    Mat intrinsic = Mat(3, 3, CV_32FC1);
    Mat distCoeffs;
    vector<Mat> rvecs;
    vector<Mat> tvecs;

    intrinsic.ptr<float>(0)[0]= 1;
    intrinsic.ptr<float>(1)[1] = 1;
    calibrateCamera(object_points, image_points, image.size(), intrinsic, 
        distCoeffs, rvecs, tvecs);

    return 0;
}

I keep getting errors from opencv on this particular line from function collectCalibrationData:

for( i = 0; i < nimages; i++ )
{
     ni = objectPoints.getMat(i).checkVector(3, CV_32F);
     CV_Assert( ni >= 0 );
     total += ni;
}

I don't know what is wrong with my object_points declaration. Could anyone explain to me why and how to fix it? I have been spending a long time on this and not getting any where. Thank you very much.

use opencv to calibrate camera problem

Hi,

I am trying to write a program for fixing camera distortion. I studied the sample code from opencv and also looked at some tutorials online. However, I was having trouble with the calibrateCamera function call. The version of opencv I am using is 2.4.5.

Here is the code that I have:

    #include "stdafx.h"
    #include <cv.h>
    #include <highgui.h>
    using namespace cv;
    using namespace std;


int _tmain(int argc, _TCHAR* argv[])
{
    int numBoards = 0;
    int numCornersHor;
    int numCornersVer;

    printf("Enter number of corners along width: ");
    scanf("%d", &numCornersHor);
    printf("Enter number of corners along height: ");
    scanf("%d", &numCornersVer);
    printf("Enter number of boards: ");
    scanf("%d", &numBoards);

    int numSquares = numCornersHor * numCornersVer;
    Size board_sz = Size(numCornersHor, numCornersVer);

    /* Chessboard images that will be used to calibrate the camera */
    vector<string> imageList(10);
    imageList[0] = "chessboard1.jpg";
    imageList[1] = "chessboard2.jpg";
    imageList[2] = "chessboard3.jpg";
    imageList[3] = "chessboard4.jpg";
    imageList[4] = "chessboard5.jpg";
    imageList[5] = "chessboard6.jpg";
    imageList[6] = "chessboard7.jpg";
    imageList[7] = "chessboard8.jpg";
    imageList[8] = "chessboard9.jpg";
    imageList[9] = "chessboard10.jpg";

    /* physical position of the corners (in 3D space) */
    vector<vector<Point3f>> object_points(1); 
    /* location of corners in the image (in 2D space) */
    vector<vector<Point2f>> image_points;
    /* corners in the current chessboard image */
    vector<Point2f> corners;
    /* ??? */
    int successes = 0;

    Mat image;
    Mat gray_image;

    for (int index = 0; index < numSquares; index++)
    {
        object_points[0].push_back(Point3f(float(index/numCornersHor), float(index % numCornersHor), 0.0f));
    }

    while(successes < numBoards)
    {
        image = imread(imageList[successes], CV_LOAD_IMAGE_COLOR);
        /* convert image into a grayscale image */
        cvtColor(image, gray_image, CV_BGR2GRAY);

        bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        if (found)
        {
            cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray_image, board_sz, corners, found);
            image_points.push_back(corners);
            successes++;
        } 
    }

    object_points.resize(image_points.size(), object_points[0]); 

    Mat intrinsic = Mat(3, 3, CV_32FC1);
    Mat distCoeffs;
    vector<Mat> rvecs;
    vector<Mat> tvecs;

    intrinsic.ptr<float>(0)[0]= 1;
    intrinsic.ptr<float>(1)[1] = 1;
    calibrateCamera(object_points, image_points, image.size(), intrinsic, 
        distCoeffs, rvecs, tvecs);

    return 0;
}

I keep getting errors from opencv on this particular line from function collectCalibrationData:

for( i = 0; i < nimages; i++ )
{
     ni = objectPoints.getMat(i).checkVector(3, CV_32F);
     CV_Assert( ni >= 0 );
     total += ni;
}

I don't know what is wrong with my object_points declaration. Could anyone explain to me why and how to fix it? I have been spending a long time on this and not getting any where. Thank you very much.

use opencv to calibrate camera problem

Hi,

I am trying to write a program for fixing camera distortion. I studied the sample code from opencv and also looked at some tutorials online. However, I was having trouble with the calibrateCamera function call. The version of opencv I am using is 2.4.5.

Here is the code that I have:

    #include "stdafx.h"
    #include <cv.h>
    #include <highgui.h>
    using namespace cv;
    using namespace std;


int _tmain(int argc, _TCHAR* argv[])
{
    int numBoards = 0;
    int numCornersHor;
    int numCornersVer;

    printf("Enter number of corners along width: ");
    scanf("%d", &numCornersHor);
    printf("Enter number of corners along height: ");
    scanf("%d", &numCornersVer);
    printf("Enter number of boards: ");
    scanf("%d", &numBoards);

    int numSquares = numCornersHor * numCornersVer;
    Size board_sz = Size(numCornersHor, numCornersVer);

    /* Chessboard images that will be used to calibrate the camera */
    vector<string> imageList(10);
    imageList[0] = "chessboard1.jpg";
    imageList[1] = "chessboard2.jpg";
    imageList[2] = "chessboard3.jpg";
    imageList[3] = "chessboard4.jpg";
    imageList[4] = "chessboard5.jpg";
    imageList[5] = "chessboard6.jpg";
    imageList[6] = "chessboard7.jpg";
    imageList[7] = "chessboard8.jpg";
    imageList[8] = "chessboard9.jpg";
    imageList[9] = "chessboard10.jpg";

    /* physical position of the corners (in 3D space) */
    vector<vector<Point3f>> object_points(1); 
    /* location of corners in the image (in 2D space) */
    vector<vector<Point2f>> image_points;
    /* corners in the current chessboard image */
    vector<Point2f> corners;
    /* ??? */
    int successes = 0;

    Mat image;
    Mat gray_image;

    for (int index = 0; index < numSquares; index++)
    {
        object_points[0].push_back(Point3f(float(index/numCornersHor), float(index % numCornersHor), 0.0f));
    }

    while(successes < numBoards)
    {
        image = imread(imageList[successes], CV_LOAD_IMAGE_COLOR);
        /* convert image into a grayscale image */
        cvtColor(image, gray_image, CV_BGR2GRAY);

        bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        if (found)
        {
            cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray_image, board_sz, corners, found);
            image_points.push_back(corners);
            successes++;
        } 
    }

    object_points.resize(image_points.size(), object_points[0]); 

    Mat intrinsic = Mat(3, 3, CV_32FC1);
    Mat distCoeffs;
    vector<Mat> rvecs;
    vector<Mat> tvecs;

    intrinsic.ptr<float>(0)[0]= 1;
    intrinsic.ptr<float>(1)[1] = 1;
    calibrateCamera(object_points, image_points, image.size(), intrinsic, 
        distCoeffs, rvecs, tvecs);

    return 0;
}

I keep getting errors from opencv on this particular line from function collectCalibrationData:

for( i = 0; i < nimages; i++ )
{
     ni = objectPoints.getMat(i).checkVector(3, CV_32F);
     CV_Assert( ni >= 0 );
     total += ni;
}

I don't know what is wrong with my object_points declaration. Could anyone explain to me why and how to fix it? I have been spending a long time on this and not getting any where. Thank you very much.

use opencv to calibrate camera problem

Hi,

I am trying to write a program for fixing camera distortion. I studied the sample code from opencv and also looked at some tutorials online. However, I was having trouble with the calibrateCamera function call. The version of opencv I am using is 2.4.5.

Here is the code that I have:

    #include "stdafx.h"
    #include <cv.h>
    #include <highgui.h>
    using namespace cv;
    using namespace std;


int _tmain(int argc, _TCHAR* argv[])
{
    int numBoards = 0;
    int numCornersHor;
    int numCornersVer;

    printf("Enter number of corners along width: ");
    scanf("%d", &numCornersHor);
    printf("Enter number of corners along height: ");
    scanf("%d", &numCornersVer);
    printf("Enter number of boards: ");
    scanf("%d", &numBoards);

    int numSquares = numCornersHor * numCornersVer;
    Size board_sz = Size(numCornersHor, numCornersVer);

    /* Chessboard images that will be used to calibrate the camera */
    vector<string> imageList(10);
    imageList[0] = "chessboard1.jpg";
    imageList[1] = "chessboard2.jpg";
    imageList[2] = "chessboard3.jpg";
    imageList[3] = "chessboard4.jpg";
    imageList[4] = "chessboard5.jpg";
    imageList[5] = "chessboard6.jpg";
    imageList[6] = "chessboard7.jpg";
    imageList[7] = "chessboard8.jpg";
    imageList[8] = "chessboard9.jpg";
    imageList[9] = "chessboard10.jpg";

    /* physical position of the corners (in 3D space) */
    vector<vector<Point3f>> object_points(1); 
    /* location of corners in the image (in 2D space) */
    vector<vector<Point2f>> image_points;
    /* corners in the current chessboard image */
    vector<Point2f> corners;
    /* ??? */
    int successes = 0;

    Mat image;
    Mat gray_image;

    for (int index = 0; index < numSquares; index++)
    {
        object_points[0].push_back(Point3f(float(index/numCornersHor), float(index % numCornersHor), 0.0f));
    }

    while(successes < numBoards)
    {
        image = imread(imageList[successes], CV_LOAD_IMAGE_COLOR);
        /* convert image into a grayscale image */
        cvtColor(image, gray_image, CV_BGR2GRAY);

        bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);

        if (found)
        {
            cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
            drawChessboardCorners(gray_image, board_sz, corners, found);
            image_points.push_back(corners);
            successes++;
        } 
    }

    object_points.resize(image_points.size(), object_points[0]); 

    Mat intrinsic = Mat(3, 3, CV_32FC1);
    Mat distCoeffs;
    vector<Mat> rvecs;
    vector<Mat> tvecs;

    intrinsic.ptr<float>(0)[0]= 1;
    intrinsic.ptr<float>(1)[1] = 1;
    calibrateCamera(object_points, image_points, image.size(), intrinsic, 
        distCoeffs, rvecs, tvecs);

    return 0;
}

I keep getting errors from opencv on this particular line from function collectCalibrationData:

for( i = 0; i < nimages; i++ )
{
     ni = objectPoints.getMat(i).checkVector(3, CV_32F);
     CV_Assert( ni >= 0 );
     total += ni;
}

The program fails the assertion every single time.

I don't know what is wrong with my object_points declaration. Could anyone explain to me why and how to fix it? I have been spending a long time on this and not getting any where. Thank you very much.