I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect, jumping around a lot, flipping between positive and negative values, and generally being wrong.
Having checked the concurrent sets of triangulated points in a 3d application, from one frame to the next they are not lining up, the cloud will be very similar, but the whole cloud will be, for example, 10 units forward from the last. So I assume my projection matrices are incorrect.
I understand that solvePnp needs the 3d points in object space, and it looks like I am providing them in camera space.
Every frame, I am:
projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);
cv::Mat RotMat, tvecFlip, rvecFlip;
cv::Rodrigues(rvec, RotMat);
RotMat = RotMat.t();
tvecFlip = -RotMat * tvec;
cv::Rodrigues(RotMat, rvecFlip);
upDatePMatrix(rvecFlip, tvecFlip, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);
The functions above are:
cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
//Convert rotation vector into rotation matrix
cv::Rodrigues(rotVec, rotMat);
//flip to camera pose
rotMat = rotMat.t(); // rotation of inverse
transVec = rotMat.t() * transVec; // translation of inverse
//Append translation vector to rotation matrix
cv::hconcat(rotMat, transVec, rotTransMat);
//Compute projection matrix by multiplying intrinsic parameter
//matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
//Formula: Projection Matrix = A * RT;
return (camMat * rotTransMat);
}
void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{
//extrinsics
float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
float ty = 0;
float tz = 0;
cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());
cv::Mat rvec2 = cv::Mat(rvec.rows, rvec.cols, rvec.type());
//rotation ( images are rectified, so this is zero)
float rots[9] = { 1,0,0,0,1,0,0,0,1 };
cv::Mat Rr2 = cv::Mat(3, 3, rvec.type(), rots);
tvec2.at<double>(0) = tvec.at<double>(0) + tx;
tvec2.at<double>(1) = tvec.at<double>(1);
tvec2.at<double>(2) = tvec.at<double>(2);
cv::Mat Rr;
cv::Rodrigues(rvec, Rr);
Rr = Rr.t();
tvec = -Rr * tvec;
tvec2 = -Rr * tvec2;
cv::Rodrigues(Rr, rvec2);
cv::Mat P1 = computeProjMat(cameraMatrix, rvec2, tvec);
cv::Mat P2 = computeProjMat(cameraMatrix, rvec2, tvec2);
P1.copyTo(P1return);
P2.copyTo(P2return);
}
std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{
std::vector<cv::Point2f> points2d;
std::vector<cv::Point3f> points3d;
for (int i = 0; i < objectPoints.size(); i++)
{
cv::Point2f tmp;
tmp.x = imagePoints[i].pt.x;
tmp.y = imagePoints[i].pt.y;
points2d.push_back(tmp);
points3d.push_back(objectPoints[i]);
}
bool useExtrinsicGuess = false;
int iterationsCount = 200;
float reprojectionError = 1.0f;
double confidence = 0.9;
tvec = tvecIn;
rvec = rvecIn;
bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);
std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);
rvec.copyTo(rvecRturn);
tvec.copyTo(tvecRTurn);
return projectedPoints;
}
std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
std::vector<cv::Point3f> p3d;
std::vector<cv::Point2f> keysLeft;
std::vector<cv::KeyPoint> keyPntsTri;
std::vector<cv::Point2f> keysRight;
for (int i = 0; i < keyPntsLeft.size(); i++)
{
cv::Mat x3D;
cv::Point2f point1 = keyPntsLeft[i].pt;
cv::Point2f point2 = keyPntsRight[i].pt;
keysLeft.push_back(point1);
keysRight.push_back(point2);
}
cv::Mat pointmatrix(keysLeft);
cv::Mat pointmatrixR(keysRight);
cv::Mat points3DHomogeneous;
cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);
for (int i = 0; i < points3DHomogeneous.cols; ++i) {
cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
p3d.push_back(point);
}
return p3d;
}
Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, for three frames, 20 frames apart.
https://ufile.io/7emhj
Thank you!