Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

SolvePnp determines well the translation but not the rotation

I am trying to extract the pose of a camera knowing five points correspondence between the image and the world. I know the intrinsics and the distortion coefficients and in order to determine the camera pose I am using the solvePnP method. My problem is that the translation vector is right but the rotation does not seem right. I inverted the joint matrix that contains tvec and rvec. If you suspect what could be happening and you need more details please ask. Thank you in advance. My code:

vector<Point2f> contoursCenter; //image points
RNG rng(12345);

for (size_t i = 0; i < contours.size(); i++)
{
    Moments m = moments(contours[i], false);
    Point2f center = Point2f(m.m10 / m.m00, m.m01 / m.m00);
    contoursCenter.push_back(center);
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    drawContours(drawing, contours, i, color, 2, 8);
    circle(drawing, center, 4, color, -1, 8, 0);
}


vector<Point2f> origCenters;
//convert back from bird eye view
perspectiveTransform(contoursCenter, origCenters, homography.inv());
for (auto center : origCenters)
{
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    circle(image, center, 4, color, -1, 8, 0);
}

namedWindow("drawing", WINDOW_NORMAL);
resizeWindow("drawing", 1366, 768);
imshow("drawing", image);
waitKey();

//solvePnP
vector<Point3f> worldCoordinates;
// camera perspective
worldCoordinates.push_back(Point3f(0.995, -0.984, 0.005 + 0.945)); //left down circle
worldCoordinates.push_back(Point3f(-0.954, -0.984, 0.005 + 0.945)); //left upper circle
worldCoordinates.push_back(Point3f(0, 0, 1.4)); //ball
worldCoordinates.push_back(Point3f(-0.954, 0.987, 0.005 + 0.945)); // right upper circle
worldCoordinates.push_back(Point3f(0.995, 0.987, 0.005 + 0.945)); // right down circle


cv::Mat intrinsic(cameraInfo_.intrinsicMatrix());
cv::Mat distortion(cameraInfo_.distortionCoeffs());

float x = 7;
float y = 0;
float z = 4.5;
tf::Vector3 pos(x, y, z);

double roll = -TFSIMD_HALF_PI - 0.39;
double pitch = 0;
double yaw = TFSIMD_HALF_PI;

tf::Quaternion quat;
quat.setRPY(roll, pitch, yaw);

tf::Transform transform;
transform.setOrigin(pos);
transform.setRotation(quat);

tf::Transform invTrans = transform.inverse();

tf::Matrix3x3 rotMat(invTrans.getRotation());
rotMat.getRPY(roll, pitch, yaw);
x = invTrans.getOrigin().x();
y = invTrans.getOrigin().y();
z = invTrans.getOrigin().z();

cv::Mat rvec = getRotationVectorFromTaitBryanAngles(roll, pitch, yaw);
cv::Mat tvec = (cv::Mat_<float>(3, 1) << x, y, z);

bool useInitialGuess = false;
solvePnP(worldCoordinates, origCenters, intrinsic, distortion, rvec, tvec, useInitialGuess, CV_EPNP);

//L2 Error using projectPoints
double errPnP = calculateError(rvec, tvec, intrinsic, distortion, origCenters, worldCoordinates);

std::cout << "PnP mean error = " << errPnP << std::endl; //error=1.2

//tf::Transform transform;
transform.setOrigin(tf::Vector3(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2)));
tf::Quaternion q;
//get roll,pitch and yaw from rotation vector
correctRVec(rvec, roll, pitch, yaw);
q.setRPY(roll, pitch, yaw);
//q.setRPY(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
transform.setRotation(q);

//change back to camera transformation
tf::Transform invTransform = transform.inverse();

double xInv = invTransform.getOrigin().x();
double yInv = invTransform.getOrigin().y();
double zInv = invTransform.getOrigin().z();

tf::Matrix3x3 invRotMat(invTransform.getRotation());
double rollInv, pitchInv, yawInv;
invRotMat.getRPY(rollInv, pitchInv, yawInv);

std::cout << "trans = [" << xInv << " " << yInv << " " << zInv << "]" << std::endl;
std::cout << "rot = [" << rollInv << " " << pitchInv << " " << yawInv << "]" << std::endl;
std::cout << "rot (deg) [" << rollInv * 180. / M_PI << " " << pitchInv * 180. / M_PI << " " << yawInv * 180. / M_PI
        << "]" << std::endl;

SolvePnp determines well the translation but not the rotation

I am trying to extract the pose of a camera knowing five points correspondence between the image and the world. I know the intrinsics and the distortion coefficients and in order to determine the camera pose I am using the solvePnP method. My problem is that the translation vector is right but the rotation does not seem right. I inverted the joint matrix that contains tvec and rvec. If you suspect what could be happening and you need more details please ask. Thank you in advance. My code:

vector<Point2f> contoursCenter; //image points
RNG rng(12345);

for (size_t i = 0; i < contours.size(); i++)
{
    Moments m = moments(contours[i], false);
    Point2f center = Point2f(m.m10 / m.m00, m.m01 / m.m00);
    contoursCenter.push_back(center);
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    drawContours(drawing, contours, i, color, 2, 8);
    circle(drawing, center, 4, color, -1, 8, 0);
}


vector<Point2f> origCenters;
//convert back from bird eye view
perspectiveTransform(contoursCenter, origCenters, homography.inv());
for (auto center : origCenters)
{
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    circle(image, center, 4, color, -1, 8, 0);
}

namedWindow("drawing", WINDOW_NORMAL);
resizeWindow("drawing", 1366, 768);
imshow("drawing", image);
waitKey();

//solvePnP
vector<Point3f> worldCoordinates;
// camera perspective
worldCoordinates.push_back(Point3f(0.995, -0.984, 0.005 + 0.945)); //left down circle
worldCoordinates.push_back(Point3f(-0.954, -0.984, 0.005 + 0.945)); //left upper circle
worldCoordinates.push_back(Point3f(0, 0, 1.4)); //ball
worldCoordinates.push_back(Point3f(-0.954, 0.987, 0.005 + 0.945)); // right upper circle
worldCoordinates.push_back(Point3f(0.995, 0.987, 0.005 + 0.945)); // right down circle


cv::Mat intrinsic(cameraInfo_.intrinsicMatrix());
cv::Mat distortion(cameraInfo_.distortionCoeffs());

float x = 7;
float y = 0;
float z = 4.5;
tf::Vector3 pos(x, y, z);

double roll = -TFSIMD_HALF_PI - 0.39;
double pitch = 0;
double yaw = TFSIMD_HALF_PI;

tf::Quaternion quat;
quat.setRPY(roll, pitch, yaw);

tf::Transform transform;
transform.setOrigin(pos);
transform.setRotation(quat);

tf::Transform invTrans = transform.inverse();

tf::Matrix3x3 rotMat(invTrans.getRotation());
rotMat.getRPY(roll, pitch, yaw);
x = invTrans.getOrigin().x();
y = invTrans.getOrigin().y();
z = invTrans.getOrigin().z();

cv::Mat rvec = getRotationVectorFromTaitBryanAngles(roll, pitch, yaw);
cv::Mat tvec = (cv::Mat_<float>(3, 1) << x, y, z);

bool useInitialGuess = false;
solvePnP(worldCoordinates, origCenters, intrinsic, distortion, rvec, tvec, useInitialGuess, CV_EPNP);

//L2 Error using projectPoints
double errPnP = calculateError(rvec, tvec, intrinsic, distortion, origCenters, worldCoordinates);

std::cout << "PnP mean error = " << errPnP << std::endl; //error=1.2

//tf::Transform transform;
transform.setOrigin(tf::Vector3(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2)));
tf::Quaternion q;
//get roll,pitch and yaw from rotation vector
correctRVec(rvec, roll, pitch, yaw);
q.setRPY(roll, pitch, yaw);
//q.setRPY(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
transform.setRotation(q);

//change back to camera transformation
tf::Transform invTransform = transform.inverse();

double xInv = invTransform.getOrigin().x();
double yInv = invTransform.getOrigin().y();
double zInv = invTransform.getOrigin().z();

tf::Matrix3x3 invRotMat(invTransform.getRotation());
double rollInv, pitchInv, yawInv;
invRotMat.getRPY(rollInv, pitchInv, yawInv);

std::cout << "trans = [" << xInv << " " << yInv << " " << zInv << "]" << std::endl;
std::cout << "rot = [" << rollInv << " " << pitchInv << " " << yawInv << "]" << std::endl;
std::cout << "rot (deg) [" << rollInv * 180. / M_PI << " " << pitchInv * 180. / M_PI << " " << yawInv * 180. / M_PI
        << "]" << std::endl;

The world frame has a x that points onwards, a y that points to the left and a z that points up. In this frame the real pose of the camera is 7 meters in x, 4.5 in z and with a pitch of 0.39 radians and a yaw of PI radians. My results are the following: trans = [7.0834 -0.0451012 4.65986] rot = [-1.97556 -0.00664226 1.56302] rot (deg) [-113.191 -0.380573 89.5545]

SolvePnp determines well the translation but not the rotation

I am trying to extract the pose of a camera knowing five points correspondence between the image and the world. I know the intrinsics and the distortion coefficients and in order to determine the camera pose I am using the solvePnP method. My problem is that the translation vector is right but the rotation does not seem right. I inverted the joint matrix that contains tvec and rvec. If you suspect what could be happening and you need more details please ask. Thank you in advance. My code:

vector<Point2f> contoursCenter; //image points
RNG rng(12345);

for (size_t i = 0; i < contours.size(); i++)
{
    Moments m = moments(contours[i], false);
    Point2f center = Point2f(m.m10 / m.m00, m.m01 / m.m00);
    contoursCenter.push_back(center);
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    drawContours(drawing, contours, i, color, 2, 8);
    circle(drawing, center, 4, color, -1, 8, 0);
}


vector<Point2f> origCenters;
//convert back from bird eye view
perspectiveTransform(contoursCenter, origCenters, homography.inv());
for (auto center : origCenters)
{
    Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
    circle(image, center, 4, color, -1, 8, 0);
}

namedWindow("drawing", WINDOW_NORMAL);
resizeWindow("drawing", 1366, 768);
imshow("drawing", image);
waitKey();

//solvePnP
vector<Point3f> worldCoordinates;
// camera perspective
worldCoordinates.push_back(Point3f(0.995, -0.984, 0.005 + 0.945)); //left down circle
worldCoordinates.push_back(Point3f(-0.954, -0.984, 0.005 + 0.945)); //left upper circle
worldCoordinates.push_back(Point3f(0, 0, 1.4)); //ball
worldCoordinates.push_back(Point3f(-0.954, 0.987, 0.005 + 0.945)); // right upper circle
worldCoordinates.push_back(Point3f(0.995, 0.987, 0.005 + 0.945)); // right down circle


cv::Mat intrinsic(cameraInfo_.intrinsicMatrix());
cv::Mat distortion(cameraInfo_.distortionCoeffs());

float x = 7;
float y = 0;
float z = 4.5;
tf::Vector3 pos(x, y, z);

double roll = -TFSIMD_HALF_PI - 0.39;
double pitch = 0;
double yaw = TFSIMD_HALF_PI;

tf::Quaternion quat;
quat.setRPY(roll, pitch, yaw);

tf::Transform transform;
transform.setOrigin(pos);
transform.setRotation(quat);

tf::Transform invTrans = transform.inverse();

tf::Matrix3x3 rotMat(invTrans.getRotation());
rotMat.getRPY(roll, pitch, yaw);
x = invTrans.getOrigin().x();
y = invTrans.getOrigin().y();
z = invTrans.getOrigin().z();

cv::Mat rvec = getRotationVectorFromTaitBryanAngles(roll, pitch, yaw);
cv::Mat tvec = (cv::Mat_<float>(3, 1) << x, y, z);

bool useInitialGuess = false;
solvePnP(worldCoordinates, origCenters, intrinsic, distortion, rvec, tvec, useInitialGuess, CV_EPNP);

//L2 Error using projectPoints
double errPnP = calculateError(rvec, tvec, intrinsic, distortion, origCenters, worldCoordinates);

std::cout << "PnP mean error = " << errPnP << std::endl; //error=1.2

//tf::Transform transform;
transform.setOrigin(tf::Vector3(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2)));
tf::Quaternion q;
//get roll,pitch and yaw from rotation vector
correctRVec(rvec, roll, pitch, yaw);
q.setRPY(roll, pitch, yaw);
//q.setRPY(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
transform.setRotation(q);

//change back to camera transformation
tf::Transform invTransform = transform.inverse();

double xInv = invTransform.getOrigin().x();
double yInv = invTransform.getOrigin().y();
double zInv = invTransform.getOrigin().z();

tf::Matrix3x3 invRotMat(invTransform.getRotation());
double rollInv, pitchInv, yawInv;
invRotMat.getRPY(rollInv, pitchInv, yawInv);

std::cout << "trans = [" << xInv << " " << yInv << " " << zInv << "]" << std::endl;
std::cout << "rot = [" << rollInv << " " << pitchInv << " " << yawInv << "]" << std::endl;
std::cout << "rot (deg) [" << rollInv * 180. / M_PI << " " << pitchInv * 180. / M_PI << " " << yawInv * 180. / M_PI
        << "]" << std::endl;

The world frame has a x that points onwards, a y that points to the left and a z that points up. In this frame the real pose of the camera is 7 meters in x, 4.5 in z and with a pitch of 0.39 radians and a yaw of PI radians. My results are the following: following:

trans = [7.0834 -0.0451012 4.65986] 4.65986]

rot = [-1.97556 -0.00664226 1.56302] 1.56302]

rot (deg) [-113.191 -0.380573 89.5545]