SolvePnp determines well the translation but not the rotation
I am trying to extract the pose of a camera knowing five points correspondence between the image and the world. I know the intrinsics and the distortion coefficients and in order to determine the camera pose I am using the solvePnP method. My problem is that the translation vector is right but the rotation does not seem right. I inverted the joint matrix that contains tvec and rvec. If you suspect what could be happening and you need more details please ask. Thank you in advance. My code:
vector<Point2f> contoursCenter; //image points
RNG rng(12345);
for (size_t i = 0; i < contours.size(); i++)
{
Moments m = moments(contours[i], false);
Point2f center = Point2f(m.m10 / m.m00, m.m01 / m.m00);
contoursCenter.push_back(center);
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawing, contours, i, color, 2, 8);
circle(drawing, center, 4, color, -1, 8, 0);
}
vector<Point2f> origCenters;
//convert back from bird eye view
perspectiveTransform(contoursCenter, origCenters, homography.inv());
for (auto center : origCenters)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
circle(image, center, 4, color, -1, 8, 0);
}
namedWindow("drawing", WINDOW_NORMAL);
resizeWindow("drawing", 1366, 768);
imshow("drawing", image);
waitKey();
//solvePnP
vector<Point3f> worldCoordinates;
// camera perspective
worldCoordinates.push_back(Point3f(0.995, -0.984, 0.005 + 0.945)); //left down circle
worldCoordinates.push_back(Point3f(-0.954, -0.984, 0.005 + 0.945)); //left upper circle
worldCoordinates.push_back(Point3f(0, 0, 1.4)); //ball
worldCoordinates.push_back(Point3f(-0.954, 0.987, 0.005 + 0.945)); // right upper circle
worldCoordinates.push_back(Point3f(0.995, 0.987, 0.005 + 0.945)); // right down circle
cv::Mat intrinsic(cameraInfo_.intrinsicMatrix());
cv::Mat distortion(cameraInfo_.distortionCoeffs());
float x = 7;
float y = 0;
float z = 4.5;
tf::Vector3 pos(x, y, z);
double roll = -TFSIMD_HALF_PI - 0.39;
double pitch = 0;
double yaw = TFSIMD_HALF_PI;
tf::Quaternion quat;
quat.setRPY(roll, pitch, yaw);
tf::Transform transform;
transform.setOrigin(pos);
transform.setRotation(quat);
tf::Transform invTrans = transform.inverse();
tf::Matrix3x3 rotMat(invTrans.getRotation());
rotMat.getRPY(roll, pitch, yaw);
x = invTrans.getOrigin().x();
y = invTrans.getOrigin().y();
z = invTrans.getOrigin().z();
cv::Mat rvec = getRotationVectorFromTaitBryanAngles(roll, pitch, yaw);
cv::Mat tvec = (cv::Mat_<float>(3, 1) << x, y, z);
bool useInitialGuess = false;
solvePnP(worldCoordinates, origCenters, intrinsic, distortion, rvec, tvec, useInitialGuess, CV_EPNP);
//L2 Error using projectPoints
double errPnP = calculateError(rvec, tvec, intrinsic, distortion, origCenters, worldCoordinates);
std::cout << "PnP mean error = " << errPnP << std::endl; //error=1.2
//tf::Transform transform;
transform.setOrigin(tf::Vector3(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2)));
tf::Quaternion q;
//get roll,pitch and yaw from rotation vector
correctRVec(rvec, roll, pitch, yaw);
q.setRPY(roll, pitch, yaw);
//q.setRPY(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
transform.setRotation(q);
//change back to camera transformation
tf::Transform invTransform = transform.inverse();
double xInv = invTransform.getOrigin().x();
double yInv = invTransform.getOrigin().y();
double zInv = invTransform.getOrigin().z();
tf::Matrix3x3 invRotMat(invTransform.getRotation());
double rollInv ...