Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect, jumping around a lot, flipping between positive and negative values, and generally being wrong.

Having checked the concurrent sets of triangulated points in a 3d application, from one frame to the next they are not lining up, the cloud will be very similar, but the whole cloud will be, for example, 10 units forward from the last. So I assume my projection matrices are incorrect.

I understand that solvePnp needs the 3d points in object space, and it looks like I am providing them in camera space.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);


cv::Mat RotMat, tvecFlip, rvecFlip;
cv::Rodrigues(rvec, RotMat);
RotMat = RotMat.t();
tvecFlip = -RotMat * tvec;
cv::Rodrigues(RotMat, rvecFlip);


upDatePMatrix(rvecFlip, tvecFlip, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());
    cv::Mat rvec2 = cv::Mat(rvec.rows, rvec.cols, rvec.type());

    //rotation ( images are rectified, so this is zero)
    float rots[9] = { 1,0,0,0,1,0,0,0,1 };
    cv::Mat Rr2 = cv::Mat(3, 3, rvec.type(), rots);

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);
    cv::Mat Rr;
    cv::Rodrigues(rvec, Rr);
    Rr = Rr.t();
    tvec = -Rr * tvec;
    tvec2 = -Rr * tvec2;


    cv::Rodrigues(Rr, rvec2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec2, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec2, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;
    tvec = tvecIn;
    rvec = rvecIn;
    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, for three frames, 20 frames apart.

https://ufile.io/7emhj

Thank you!

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect, jumping around a lot, flipping between positive and negative values, and generally being wrong.

Having checked the concurrent sets of triangulated points in a 3d application, from one frame to the next they are not lining up, the cloud will be very similar, but the whole cloud will be, for example, 10 units forward from the last. So I assume my projection matrices are incorrect.

I understand that solvePnp needs the 3d points in object space, and it looks like I am providing them in camera space.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);


cv::Mat RotMat, tvecFlip, rvecFlip;
cv::Rodrigues(rvec, RotMat);
RotMat = RotMat.t();
tvecFlip = -RotMat * tvec;
cv::Rodrigues(RotMat, rvecFlip);


upDatePMatrix(rvecFlip, tvecFlip, 
upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());
    cv::Mat rvec2 = cv::Mat(rvec.rows, rvec.cols, rvec.type());

    //rotation ( images are rectified, so this is zero)
    float rots[9] = { 1,0,0,0,1,0,0,0,1 };
    cv::Mat Rr2 = cv::Mat(3, 3, rvec.type(), rots);

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);
    cv::Mat Rr;
    cv::Rodrigues(rvec, Rr);
    Rr = Rr.t();
    tvec = -Rr * tvec;
    tvec2 = -Rr * tvec2;


    cv::Rodrigues(Rr, rvec2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec2, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec2, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;
    tvec = tvecIn;
    rvec = rvecIn;
     bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, for three frames, 20 frames apart.

https://ufile.io/7emhj

Thank you!

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect, jumping around a lot, flipping between positive and negative values, and generally being wrong.incorrect.

Having checked the concurrent sets of triangulated points in a 3d application, from one the frame - to the next they are not lining up, the cloud will - frame point clouds move, and the motion I expect on the camera seems to be very similar, but the whole cloud will be, for example, 10 units forward from the last. So I assume my on the point clouds instead. I have functions to update the projection matrices are incorrect.

I understand that solvePnp needs the 3d as I go, which should put the points in object space, and it looks like the same world position, but doesn't. I am providing them in camera space. doing something wrong. Any advice is much appreciated.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);



upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());
    cv::Mat rvec2 = cv::Mat(rvec.rows, rvec.cols, rvec.type());

    //rotation ( images are rectified, so this is zero)
    float rots[9] = { 1,0,0,0,1,0,0,0,1 };
    cv::Mat Rr2 = cv::Mat(3, 3, rvec.type(), rots);

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;

    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, saved every ten frames for three frames, 20 frames apart.seventy frames.

https://ufile.io/7emhjhttp://s000.tinyupload.com/index.php?file_id=92785130482221661309

Thank you!

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect.

Having checked the concurrent sets of triangulated points in a 3d application, the frame - to - frame point clouds move, and the motion I expect on the camera seems to be on the point clouds instead. I have functions to update the projection matrices as I go, which should put the points in the same world position, but doesn't. I am doing something wrong. Any advice is much appreciated.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);



upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());
    cv::Mat rvec2 = cv::Mat(rvec.rows, rvec.cols, rvec.type());

    //rotation ( images are rectified, so this is zero)
    float rots[9] = { 1,0,0,0,1,0,0,0,1 };
    cv::Mat Rr2 = cv::Mat(3, 3, rvec.type(), rots);

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;

    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, saved every ten frames for seventy frames.

http://s000.tinyupload.com/index.php?file_id=92785130482221661309

Thank you!

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect.

Having checked the concurrent sets of triangulated points in a 3d application, the frame - to - frame point clouds move, and the motion I expect on the camera seems to be on the point clouds instead. I have functions to update the projection matrices as I go, which should put the points in the same world position, but doesn't. I am doing something wrong. Any advice is much appreciated.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);



upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;

    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

EDIT: I am also attempting to use EssentialMatrix and recoverPose to create the P matrices, as in the following function, but this gives even more erratic results.

bool bF;
cv::Mat Utilities::returnProjectionMatrix(std::vector<cv::KeyPoint> kPrev, std::vector<cv::KeyPoint> kCurr, std::vector<cv::DMatch> matches, cv::Mat cameraMatrix,
    cv::Mat &P1, cv::Mat &P2)
{

    double FOCAL_LENGTH = cameraMatrix.at<double>(0, 0);
    double cx = cameraMatrix.at<double>(0, 2);
    double cy = cameraMatrix.at<double>(1, 2);

    cv::Point2d pp(cx, cy);

    std::vector<cv::Point2f> src, dst;
    for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
        it != matches.end(); ++it)
    {
        //left keypoints
        float x = kPrev[it->queryIdx].pt.x;
        float y = kPrev[it->queryIdx].pt.y;
        src.push_back(cv::Point2f(x, y));
        //right keypoints
        x = kCurr[it->trainIdx].pt.x;
        y = kCurr[it->trainIdx].pt.y;
        dst.push_back(cv::Point2f(x, y));
    }

    cv::Mat mask;

    // NOTE: pose from dst to src
    cv::Mat E = findEssentialMat(dst, src, FOCAL_LENGTH, pp, cv::RANSAC, 0.999, 1.0, mask);
    cv::Mat local_R, local_t;


    recoverPose(E, dst, src, local_R, local_t, FOCAL_LENGTH, pp, mask);

    // local transform BETWEEN FRAMES, NOT GLOBAL
    cv::Mat T = cv::Mat::eye(4, 4, CV_64F);
    local_R.copyTo(T(cv::Range(0, 3), cv::Range(0, 3)));
    local_t.copyTo(T(cv::Range(0, 3), cv::Range(3, 4)));

    // accumulate transform
    if (bF == false)
    {
        T.copyTo(currentTransform);
        bF = true;
    }
    currentTransform = currentTransform*T;

    // make projection matrix
    cv::Mat R = currentTransform(cv::Range(0, 3), cv::Range(0, 3));
    cv::Mat t = currentTransform(cv::Range(0, 3), cv::Range(3, 4));

    std::cout << t << std::endl;

    cv::Mat P(3, 4, CV_64F);

    P(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    P(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t;
    P = cameraMatrix*P;

    P.copyTo(P1);


    //create right P matrix
    //baseline
    cv::Mat currentTransformR;
    currentTransform.copyTo(currentTransformR);
    float tx = 0.118;
    currentTransformR.at<double>(0, 3) = currentTransformR.at<double>(0, 3) + tx;


    cv::Mat t2 = currentTransformR(cv::Range(0, 3), cv::Range(3, 4));

    cv::Mat Ptwo(3, 4, CV_64F);

    Ptwo(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    Ptwo(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t2;
    Ptwo = cameraMatrix*Ptwo;

    Ptwo.copyTo(P2);

    return E;
}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, saved every ten frames for seventy frames.

http://s000.tinyupload.com/index.php?file_id=92785130482221661309

Thank you!

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect.

Having checked the concurrent sets of triangulated points in a 3d application, the frame - to - frame point clouds move, and the motion I expect on the camera seems to be on the point clouds instead. I have functions to update the projection matrices as I go, which should put the points in the same world position, but doesn't. I am doing something wrong. Any advice is much appreciated.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);



upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;

    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

EDIT: I am also attempting to use EssentialMatrix and recoverPose to create the P matrices, as in the following function, but this gives even more erratic results.

bool bF;
cv::Mat Utilities::returnProjectionMatrix(std::vector<cv::KeyPoint> kPrev, std::vector<cv::KeyPoint> kCurr, std::vector<cv::DMatch> matches, cv::Mat cameraMatrix,
    cv::Mat &P1, cv::Mat &P2)
{

    double FOCAL_LENGTH = cameraMatrix.at<double>(0, 0);
    double cx = cameraMatrix.at<double>(0, 2);
    double cy = cameraMatrix.at<double>(1, 2);

    cv::Point2d pp(cx, cy);

    std::vector<cv::Point2f> src, dst;
    for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
        it != matches.end(); ++it)
    {
        //left keypoints
        float x = kPrev[it->queryIdx].pt.x;
        float y = kPrev[it->queryIdx].pt.y;
        src.push_back(cv::Point2f(x, y));
        //right keypoints
        x = kCurr[it->trainIdx].pt.x;
        y = kCurr[it->trainIdx].pt.y;
        dst.push_back(cv::Point2f(x, y));
    }

    cv::Mat mask;

    // NOTE: pose from dst to src
    cv::Mat E = findEssentialMat(dst, src, FOCAL_LENGTH, pp, cv::RANSAC, 0.999, 1.0, mask);
    cv::Mat local_R, local_t;


    recoverPose(E, dst, src, local_R, local_t, FOCAL_LENGTH, pp, mask);

    // local transform BETWEEN FRAMES, NOT GLOBAL
    cv::Mat T = cv::Mat::eye(4, 4, CV_64F);
    local_R.copyTo(T(cv::Range(0, 3), cv::Range(0, 3)));
    local_t.copyTo(T(cv::Range(0, 3), cv::Range(3, 4)));

    // accumulate transform
    if (bF == false)
    {
        T.copyTo(currentTransform);
        bF = true;
    }
    currentTransform = currentTransform*T;

    // make projection matrix
    cv::Mat R = currentTransform(cv::Range(0, 3), cv::Range(0, 3));
    cv::Mat t = currentTransform(cv::Range(0, 3), cv::Range(3, 4));

    std::cout << t << std::endl;

    cv::Mat P(3, 4, CV_64F);

    P(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    P(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t;
    P = cameraMatrix*P;

    P.copyTo(P1);


    //create right P matrix
    //baseline
    cv::Mat currentTransformR;
    currentTransform.copyTo(currentTransformR);
    float tx = 0.118;
    currentTransformR.at<double>(0, 3) = currentTransformR.at<double>(0, 3) + tx;


    cv::Mat t2 = currentTransformR(cv::Range(0, 3), cv::Range(3, 4));

    cv::Mat Ptwo(3, 4, CV_64F);

    Ptwo(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    Ptwo(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t2;
    Ptwo = cameraMatrix*Ptwo;

    Ptwo.copyTo(P2);

    return E;
}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, saved every ten frames for seventy frames.

http://s000.tinyupload.com/index.php?file_id=92785130482221661309

Thank you!

EDIT:

I have also tried to remove the P matrix update, and adjusted my triangulation function to the following, to flip the points that way. I see the same result, where the solvePnP results look terrible, and the point cloud moves.

std::vector<cv::Point3f> Triangulation::triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2, std::vector<cv::DMatch> matches, std::vector<cv::KeyPoint> &keyPntsTriReturn)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);

    }


    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);
    //

    //test flip
    cv::Mat K, R, T;
    cv::decomposeProjectionMatrix(P1, K, R, T);
    cv::Point3d translate = toInHomo(cv::Vec4d(T));
    cv::Mat tt(translate);

    for (int p = 0; p < points3DHomogeneous.cols; ++p)
    {
        cv::Mat Tran = cv::Mat::eye(4, 4, CV_32F);
        R.copyTo(Tran(cv::Range(0, 3), cv::Range(0, 3)));
        tt.copyTo(Tran(cv::Range(0, 3), cv::Range(3, 4)));

        cv::Mat point = points3DHomogeneous.col(p);
        point = Tran.inv() * point;

        cv::Point3d pointFlip = toInHomo(cv::Vec4d(point));
        p3d.push_back(pointFlip);

    }

    return p3d;

}

Triangulating points into world-space, and using in solvePnp.

I am triangulating points from a stereo system, then using these points in solvePnPRansac. When I reproject the points, they look good, but the tvec and rvec values returned are incorrect.

Having checked the concurrent sets of triangulated points in a 3d application, the frame - to - frame point clouds move, and the motion I expect on the camera seems to be on the point clouds instead. I have functions to update the projection matrices as I go, which should put the points in the same world position, but doesn't. I am doing something wrong. Any advice is much appreciated.

Every frame, I am:

projectedPoints = CvPnpRansac(keyPntsGood, objectPointsTri, cameraMatrix, distCoeffs, rvec, tvec);



upDatePMatrix(rvec, tvec, P1, P2);
std::vector<cv::Point3f> objectPointsTri = triangulateCv(keyPntsGoodL, keyPntsGoodR, P1, P2);

The functions above are:

cv::Mat computeProjMat(cv::Mat camMat, cv::Mat rotVec, cv::Mat transVec)
{
    cv::Mat rotMat(3, 3, CV_64F), rotTransMat(3, 4, CV_64F); //Init.
     //Convert rotation vector into rotation matrix 
    cv::Rodrigues(rotVec, rotMat);
    //flip to camera pose
    rotMat = rotMat.t();  // rotation of inverse
    transVec = rotMat.t() * transVec; // translation of inverse

                                      //Append translation vector to rotation matrix
    cv::hconcat(rotMat, transVec, rotTransMat);
    //Compute projection matrix by multiplying intrinsic parameter 
    //matrix (A) with 3 x 4 rotation and translation pose matrix (RT).
    //Formula: Projection Matrix = A * RT;
    return (camMat * rotTransMat);
}

void upDatePMatrix(cv::Mat rvec, cv::Mat tvec, cv::Mat &P1return, cv::Mat &P2return)
{

    //extrinsics 
    float tx = 0.118; // STEREO BASELINE, to offset the right cam from the left.
    float ty = 0;
    float tz = 0;


    cv::Mat tvec2 = cv::Mat(tvec.rows, tvec.cols, tvec.type());

    tvec2.at<double>(0) = tvec.at<double>(0) + tx;
    tvec2.at<double>(1) = tvec.at<double>(1);
    tvec2.at<double>(2) = tvec.at<double>(2);


    cv::Mat P1 = computeProjMat(cameraMatrix, rvec, tvec);
    cv::Mat P2 = computeProjMat(cameraMatrix, rvec, tvec2);


    P1.copyTo(P1return);
    P2.copyTo(P2return);


}


std::vector<cv::Point2f> CvPnpRansac(std::vector<cv::KeyPoint> imagePoints, std::vector<cv::Point3f> objectPoints, cv::Mat cameraMatrix, cv::Mat distCoeffs, cv::Mat &rvecRturn, cv::Mat &tvecRTurn)
{

    std::vector<cv::Point2f> points2d;
    std::vector<cv::Point3f> points3d;
    for (int i = 0; i < objectPoints.size(); i++)
    {
        cv::Point2f tmp;
        tmp.x = imagePoints[i].pt.x;
        tmp.y = imagePoints[i].pt.y;
        points2d.push_back(tmp);
        points3d.push_back(objectPoints[i]);
    }


    bool useExtrinsicGuess = false;
    int iterationsCount = 200;
    float reprojectionError = 1.0f;
    double confidence = 0.9;

    bool success = cv::solvePnPRansac(points3d, points2d, cameraMatrix, distCoeffs, rvec, tvec, useExtrinsicGuess, iterationsCount, reprojectionError, confidence, cv::noArray(), cv::SOLVEPNP_ITERATIVE);


    std::vector<cv::Point2f> projectedPoints;
    cv::projectPoints(objectPoints, rvec, tvec, cameraMatrix, distCoeffs, projectedPoints);


    rvec.copyTo(rvecRturn);
    tvec.copyTo(tvecRTurn);


    return projectedPoints;

}





std::vector<cv::Point3f> triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);
    }

    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);

    for (int i = 0; i < points3DHomogeneous.cols; ++i) {

        cv::Point3d point = toInHomo(cv::Vec4d(points3DHomogeneous.col(i)));
        p3d.push_back(point);
    }   

    return p3d;

}

EDIT: I am also attempting to use EssentialMatrix and recoverPose to create the P matrices, as in the following function, but this gives even more erratic results.

bool bF;
cv::Mat Utilities::returnProjectionMatrix(std::vector<cv::KeyPoint> kPrev, std::vector<cv::KeyPoint> kCurr, std::vector<cv::DMatch> matches, cv::Mat cameraMatrix,
    cv::Mat &P1, cv::Mat &P2)
{

    double FOCAL_LENGTH = cameraMatrix.at<double>(0, 0);
    double cx = cameraMatrix.at<double>(0, 2);
    double cy = cameraMatrix.at<double>(1, 2);

    cv::Point2d pp(cx, cy);

    std::vector<cv::Point2f> src, dst;
    for (std::vector<cv::DMatch>::const_iterator it = matches.begin();
        it != matches.end(); ++it)
    {
        //left keypoints
        float x = kPrev[it->queryIdx].pt.x;
        float y = kPrev[it->queryIdx].pt.y;
        src.push_back(cv::Point2f(x, y));
        //right keypoints
        x = kCurr[it->trainIdx].pt.x;
        y = kCurr[it->trainIdx].pt.y;
        dst.push_back(cv::Point2f(x, y));
    }

    cv::Mat mask;

    // NOTE: pose from dst to src
    cv::Mat E = findEssentialMat(dst, src, FOCAL_LENGTH, pp, cv::RANSAC, 0.999, 1.0, mask);
    cv::Mat local_R, local_t;


    recoverPose(E, dst, src, local_R, local_t, FOCAL_LENGTH, pp, mask);

    // local transform BETWEEN FRAMES, NOT GLOBAL
    cv::Mat T = cv::Mat::eye(4, 4, CV_64F);
    local_R.copyTo(T(cv::Range(0, 3), cv::Range(0, 3)));
    local_t.copyTo(T(cv::Range(0, 3), cv::Range(3, 4)));

    // accumulate transform
    if (bF == false)
    {
        T.copyTo(currentTransform);
        bF = true;
    }
    currentTransform = currentTransform*T;

    // make projection matrix
    cv::Mat R = currentTransform(cv::Range(0, 3), cv::Range(0, 3));
    cv::Mat t = currentTransform(cv::Range(0, 3), cv::Range(3, 4));

    std::cout << t << std::endl;

    cv::Mat P(3, 4, CV_64F);

    P(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    P(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t;
    P = cameraMatrix*P;

    P.copyTo(P1);


    //create right P matrix
    //baseline
    cv::Mat currentTransformR;
    currentTransform.copyTo(currentTransformR);
    float tx = 0.118;
    currentTransformR.at<double>(0, 3) = currentTransformR.at<double>(0, 3) + tx;


    cv::Mat t2 = currentTransformR(cv::Range(0, 3), cv::Range(3, 4));

    cv::Mat Ptwo(3, 4, CV_64F);

    Ptwo(cv::Range(0, 3), cv::Range(0, 3)) = R.t();
    Ptwo(cv::Range(0, 3), cv::Range(3, 4)) = -R.t()*t2;
    Ptwo = cameraMatrix*Ptwo;

    Ptwo.copyTo(P2);

    return E;
}

Where am I going wrong here? How can I triangulate each frame's points into the same world-space? I have attached some demo data, the rvec and tvec values I get, the Left and Right points that I am triangulating, and my resulting 3d points, saved every ten frames for seventy frames.

http://s000.tinyupload.com/index.php?file_id=92785130482221661309

Thank you!

EDIT:

I have also tried to remove the P matrix update, and adjusted my triangulation function to the following, to flip the points that way. I see the same result, where the solvePnP results look terrible, This makes the reprojected points jump around, and the point cloud moves. clouds in 3d are miles apart from frame to frame.

std::vector<cv::Point3f> Triangulation::triangulateCv(std::vector<cv::KeyPoint> keyPntsLeft, std::vector<cv::KeyPoint> keyPntsRight, cv::Mat P1, cv::Mat P2, std::vector<cv::DMatch> matches, std::vector<cv::KeyPoint> &keyPntsTriReturn)
cv::Mat rvec, cv::Mat tvec)
{
    std::vector<cv::Point3f> p3d;

    std::vector<cv::Point2f> keysLeft;
    std::vector<cv::KeyPoint> keyPntsTri;
    std::vector<cv::Point2f> keysRight;

    for (int i = 0; i < keyPntsLeft.size(); i++)
    {
        cv::Mat x3D;

        cv::Point2f point1 = keyPntsLeft[i].pt;
        cv::Point2f point2 = keyPntsRight[i].pt;

        keysLeft.push_back(point1);
        keysRight.push_back(point2);

    }


    cv::Mat pointmatrix(keysLeft);
    cv::Mat pointmatrixR(keysRight);

    cv::Mat points3DHomogeneous;

    cv::triangulatePoints(P1, P2, pointmatrix, pointmatrixR, points3DHomogeneous);
    //

    //test flip
    cv::Mat K, R, T;
    cv::decomposeProjectionMatrix(P1, K, R, T);
    cv::Point3d translate = toInHomo(cv::Vec4d(T));
    cv::Mat tt(translate);
R;
    cv::Rodrigues(rvec, R);

    for (int p = 0; p < points3DHomogeneous.cols; ++p)
    {
        cv::Mat Tran = cv::Mat::eye(4, 4, CV_32F);
        R.copyTo(Tran(cv::Range(0, 3), cv::Range(0, 3)));
        tt.copyTo(Tran(cv::Range(0, tvec.copyTo(Tran(cv::Range(0, 3), cv::Range(3, 4)));

        cv::Mat point = points3DHomogeneous.col(p);
        point = Tran.inv() * point;

        cv::Point3d pointFlip = toInHomo(cv::Vec4d(point));
        p3d.push_back(pointFlip);

    }

    return p3d;

 }