Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Hi, thanks for the answers.

Do you have the (relative) 3d position of your points.

I don’t know the (relative) 3d position of the points. I know the two images, the focal length and the sensor size of the cameras.

I tested with Blender if that's possible. Blender motion tracking can compute the relative position and rotation of the two cameras. Blender needs 8 points in both images. The result is very exact.

It is possible. But how?

I have found the openCv function findFundamentalMat.

findFundamentalMat also requires min 8 points* in both images. This is the same rule as in Blender.

*CV_FM_8POINT for an 8-point algorithm. N>=8

CV_FM_RANSAC for the RANSAC algorithm. N>=8

CV_FM_LMEDS for the LMedS algorithm. N>=8

And I found the function stereoCalibrate.

Hi, thanks for the answers.

Do you have the (relative) 3d position of your points.

I don’t know the (relative) 3d position of the points. I know the two images, the focal length and the sensor size of the cameras.

I tested with Blender if that's possible. Blender motion tracking can compute the relative position and rotation of the two cameras. Blender needs 8 points in both images. The result is very exact.

It is possible. But how?

I have found the openCv function findFundamentalMat.

findFundamentalMat also requires min 8 points* in both images. This is the same rule as in Blender.

*CV_FM_8POINT for an 8-point algorithm. N>=8

CV_FM_RANSAC for the RANSAC algorithm. N>=8

CV_FM_LMEDS for the LMedS algorithm. N>=8

And I found the function stereoCalibrate.

UPDATE 2015.06.29 -----UPDATE 2015.06.29------- UPDATE 2015.06.29

Hi. thanks for the comments.

Why 8 Points? As LBerger said, every pair of points gives you a constraint in the form of p2'Fp1 = 0. Why you need 8 points is not that obvious.

The animation programm Blender can compute the points and position of the camera. Blender say that you needs min 8 points.

I take a look at the function computeCorrespondEpilines. Here is the result. Image 2 looks like very good.

Image 1

image description

Image 2

image description

I have copied the image into Blender.

Here is the result. image description

But what happens next? How can I get the following values? X, Y, Z, Roll, Pitch and Yaw

image description

Here is my code.

#include <QCoreApplication>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include <iostream>
using namespace cv;
using namespace std;


Mat image1;
Mat image2;
Mat  F;

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    // Open image 1
    QString fileName = "cam1c.jpg";   
   image1= imread(fileName.toAscii().data());

   // Open image 2
   QString fileName2 = "cam2c.jpg";
  image2= imread(fileName2.toAscii().data());



  // this are the points
  // I added the point values manually (it is a test)
  vector<Point2f> image1_points;
  vector<Point2f> image2_points;
  image1_points.push_back(Point(403.83,299.63));
  image2_points.push_back(Point(401.38 ,300.03));

  image1_points.push_back(Point(311.5,388.5));
  image2_points.push_back(Point(310.45 ,378.28));

  image1_points.push_back(Point(741.9,72.08));
  image2_points.push_back(Point(567.58 ,160.20));

  image1_points.push_back(Point(488.45,211.58));
  image2_points.push_back(Point(397.43 ,237.73));

  image1_points.push_back(Point(250.6,200.43));
  image2_points.push_back(Point(314.95 ,229.7));

  image1_points.push_back(Point(171,529.08));
  image2_points.push_back(Point(359.9 ,477.5));

  image1_points.push_back(Point(400,227.75));
  image2_points.push_back(Point(272.78 ,251.90));

  image1_points.push_back(Point(513.95,414));
  image2_points.push_back(Point(508.15 ,361.03));

  image1_points.push_back(Point(280.68,140.9));
  image2_points.push_back(Point(223.55 ,178.93));

  image1_points.push_back(Point(479.58,220.48));
  image2_points.push_back(Point(355.98 ,244.63));

  image1_points.push_back(Point(621.95,122.48));
  image2_points.push_back(Point(454.78 ,179.60));

  image1_points.push_back(Point(293.93,406.13));
  image2_points.push_back(Point(73.90 ,435.93));

  image1_points.push_back(Point(570.98,72.1));
  image2_points.push_back(Point(511.03 ,153.8));

  image1_points.push_back(Point(44.88,122.43));
  image2_points.push_back(Point(85.60 ,153.63));


  // Draw the points (image1)
  for (int i=0; i<image1_points.size(); ++i)
  {
      float xValue = image1_points[i].x;
      float yValue = image1_points[i].y;
      circle(image1, Point(xValue,yValue), 11, Scalar(0,0,255), 2, 8 );
  }
  // Draw the points (image2)
  for (int i=0; i<image2_points.size(); ++i)
  {
      float xValue = image2_points[i].x;
      float yValue = image2_points[i].y;
      circle(image2, Point(xValue,yValue), 11, Scalar(0,0,255), 2, 8 );
  }

  // the fundamental Matrix
  cout << "findFundamentalMat CV_FM_RANSAC,1.00,1.00" <<endl;
  F = findFundamentalMat(Mat(image1_points), Mat(image2_points),FM_RANSAC,0,0);

  // show the values
  cout <<  F.at<float>(0,0)<<endl; // -124928
  cout <<  F.at<float>(0,1)<<endl; //-0.230494
  cout <<  F.at<float>(0,2)<<endl; // 0.564655
  cout <<  F.at<float>(1,0)<<endl; // 1.33879e+25
  cout <<  F.at<float>(1,1)<<endl; // 0.283033
  cout <<  F.at<float>(1,2)<<endl; // 1.4038e+28
  cout <<  F.at<float>(2,0)<<endl; // -1.09098e-18
  cout <<  F.at<float>(2,1)<<endl; // 0.409244
  cout <<  F.at<float>(2,2)<<endl; // 6.07045e+22



 // computeCorrespondEpilines image 1
 vector<Vec3f> epilines_image1;
 computeCorrespondEpilines(image1_points,1,F,epilines_image1);
 // RESULT
 /*
 [-0.0857736, -0.996315, 332.137]
 [-0.16384, -0.986487, 423.041]
 [0.112013, -0.993707, 95.4816]
 [-0.00812146, -0.999967, 240.303]
 [0.00183964, -0.999998, 228.422]
 [-0.283169, -0.95907, 559.134]
 [-0.0221135, -0.999755, 256.953]
 [-0.186857, -0.982387, 449.565]
 [0.0541392, -0.998533, 165.666]
 [-0.0160421, -0.999871, 249.734]
 [0.069402, -0.997589, 147.232]
 [-0.179446, -0.983768, 441.039]
 [0.112103, -0.993697, 95.3717]
 [0.069851, -0.997557, 146.689]
 */

 // draw the lines in image 2
 cout << "values"  <<endl;
 for (vector<Vec3f>::const_iterator it= epilines_image1.begin(); it!=epilines_image1.end(); ++it)
{
     cout << (*it) <<endl;
     line(image2,cv::Point(0,-(*it)[2]/(*it)[1]),cv::Point(image2.cols,-((*it)[2]+(*it)[0]*image2.cols)/(*it)[1]),cv::Scalar(0,0,255));
 }

// computeCorrespondEpilines image 1
vector<Vec3f> epilines_image2;
 computeCorrespondEpilines(image2_points,2,F,epilines_image2);
 // RESULT
 /*
 [0.00191114, 0.999998, -301.403]
 [0.00240657, 0.999997, -389.559]
 [0.000624515, 1, -72.4628]
 [0.00140625, 0.999999, -211.562]
 [0.00134315, 0.999999, -200.336]
 [0.00319328, 0.999995, -529.543]
 [0.00149637, 0.999999, -227.598]
 [0.00255128, 0.999997, -415.307]
 [0.00100565, 1, -140.282]
 [0.00145705, 0.999999, -220.602]
 [0.000904554, 1, -122.292]
 [0.00250309, 0.999997, -406.732]
 [0.000617913, 1, -71.288]
 [0.000903133, 1, -122.04]
 */

 // draw the lines in image 1
 cout << "values"  <<endl;
 for (vector<Vec3f>::const_iterator it2= epilines_image2.begin(); it2!=epilines_image2.end(); ++it2)
{
     cout << (*it2) <<endl;
     line(image1,cv::Point(0,-(*it2)[2]/(*it2)[1]),cv::Point(image1.cols,-((*it2)[2]+(*it2)[0]*image1.cols)/(*it2)[1]),cv::Scalar(0,0,255));
 }


  // show the images
  namedWindow("Image 1");
  imshow("Image 1", image1);
  namedWindow("Image 2");
  imshow("Image 2", image2);

  // save the images
  std::vector<int> qualityType;
  qualityType.push_back(CV_IMWRITE_JPEG_QUALITY);
  qualityType.push_back(90);
  imwrite("epilines1.jpg",image1,qualityType);
  imwrite("epilines2.jpg",image2,qualityType);
    return a.exec();
}

Hi, thanks for the answers.

Do you have the (relative) 3d position of your points.

I don’t know the (relative) 3d position of the points. I know the two images, the focal length and the sensor size of the cameras.

I tested with Blender if that's possible. Blender motion tracking can compute the relative position and rotation of the two cameras. Blender needs 8 points in both images. The result is very exact.

It is possible. But how?

I have found the openCv function findFundamentalMat.

findFundamentalMat also requires min 8 points* in both images. This is the same rule as in Blender.

*CV_FM_8POINT for an 8-point algorithm. N>=8

CV_FM_RANSAC for the RANSAC algorithm. N>=8

CV_FM_LMEDS for the LMedS algorithm. N>=8

And I found the function stereoCalibrate.

UPDATE 2015.06.29 -----UPDATE 2015.06.29------- UPDATE 2015.06.29

Hi. thanks for the comments.

Why 8 Points? As LBerger said, every pair of points gives you a constraint in the form of p2'Fp1 = 0. Why you need 8 points is not that obvious.

The animation programm Blender can compute the points and position of the camera. Blender say that you needs min 8 points.

I take a look at the function computeCorrespondEpilines. Here is the result. Image 2 looks like very good.

Image 1

image description

Image 2

image description

I have copied the image into Blender.

Here is the result. image description

But what happens next? How can I get the following values? X, Y, Z, Roll, Pitch and Yaw

image description

Here is my code.

#include <QCoreApplication>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include <iostream>
using namespace cv;
using namespace std;


Mat image1;
Mat image2;
Mat  F;

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    // Open image 1
    QString fileName = "cam1c.jpg";   
   image1= imread(fileName.toAscii().data());

   // Open image 2
   QString fileName2 = "cam2c.jpg";
  image2= imread(fileName2.toAscii().data());



  // this are the points
  // I added the point values manually (it is a test)
  vector<Point2f> image1_points;
  vector<Point2f> image2_points;
  image1_points.push_back(Point(403.83,299.63));
  image2_points.push_back(Point(401.38 ,300.03));

  image1_points.push_back(Point(311.5,388.5));
  image2_points.push_back(Point(310.45 ,378.28));

  image1_points.push_back(Point(741.9,72.08));
  image2_points.push_back(Point(567.58 ,160.20));

  image1_points.push_back(Point(488.45,211.58));
  image2_points.push_back(Point(397.43 ,237.73));

  image1_points.push_back(Point(250.6,200.43));
  image2_points.push_back(Point(314.95 ,229.7));

  image1_points.push_back(Point(171,529.08));
  image2_points.push_back(Point(359.9 ,477.5));

  image1_points.push_back(Point(400,227.75));
  image2_points.push_back(Point(272.78 ,251.90));

  image1_points.push_back(Point(513.95,414));
  image2_points.push_back(Point(508.15 ,361.03));

  image1_points.push_back(Point(280.68,140.9));
  image2_points.push_back(Point(223.55 ,178.93));

  image1_points.push_back(Point(479.58,220.48));
  image2_points.push_back(Point(355.98 ,244.63));

  image1_points.push_back(Point(621.95,122.48));
  image2_points.push_back(Point(454.78 ,179.60));

  image1_points.push_back(Point(293.93,406.13));
  image2_points.push_back(Point(73.90 ,435.93));

  image1_points.push_back(Point(570.98,72.1));
  image2_points.push_back(Point(511.03 ,153.8));

  image1_points.push_back(Point(44.88,122.43));
  image2_points.push_back(Point(85.60 ,153.63));


  // Draw the points (image1)
  for (int i=0; i<image1_points.size(); ++i)
  {
      float xValue = image1_points[i].x;
      float yValue = image1_points[i].y;
      circle(image1, Point(xValue,yValue), 11, Scalar(0,0,255), 2, 8 );
  }
  // Draw the points (image2)
  for (int i=0; i<image2_points.size(); ++i)
  {
      float xValue = image2_points[i].x;
      float yValue = image2_points[i].y;
      circle(image2, Point(xValue,yValue), 11, Scalar(0,0,255), 2, 8 );
  }

  // the fundamental Matrix
  cout << "findFundamentalMat CV_FM_RANSAC,1.00,1.00" <<endl;
  F = findFundamentalMat(Mat(image1_points), Mat(image2_points),FM_RANSAC,0,0);

  // show the values
  cout <<  F.at<float>(0,0)<<endl; // -124928
  cout <<  F.at<float>(0,1)<<endl; //-0.230494
  cout <<  F.at<float>(0,2)<<endl; // 0.564655
  cout <<  F.at<float>(1,0)<<endl; // 1.33879e+25
  cout <<  F.at<float>(1,1)<<endl; // 0.283033
  cout <<  F.at<float>(1,2)<<endl; // 1.4038e+28
  cout <<  F.at<float>(2,0)<<endl; // -1.09098e-18
  cout <<  F.at<float>(2,1)<<endl; // 0.409244
  cout <<  F.at<float>(2,2)<<endl; // 6.07045e+22



 // computeCorrespondEpilines image 1
 vector<Vec3f> epilines_image1;
 computeCorrespondEpilines(image1_points,1,F,epilines_image1);
 // RESULT
 /*
 [-0.0857736, -0.996315, 332.137]
 [-0.16384, -0.986487, 423.041]
 [0.112013, -0.993707, 95.4816]
 [-0.00812146, -0.999967, 240.303]
 [0.00183964, -0.999998, 228.422]
 [-0.283169, -0.95907, 559.134]
 [-0.0221135, -0.999755, 256.953]
 [-0.186857, -0.982387, 449.565]
 [0.0541392, -0.998533, 165.666]
 [-0.0160421, -0.999871, 249.734]
 [0.069402, -0.997589, 147.232]
 [-0.179446, -0.983768, 441.039]
 [0.112103, -0.993697, 95.3717]
 [0.069851, -0.997557, 146.689]
 */

 // draw the lines in image 2
 cout << "values"  <<endl;
 for (vector<Vec3f>::const_iterator it= epilines_image1.begin(); it!=epilines_image1.end(); ++it)
{
     cout << (*it) <<endl;
     line(image2,cv::Point(0,-(*it)[2]/(*it)[1]),cv::Point(image2.cols,-((*it)[2]+(*it)[0]*image2.cols)/(*it)[1]),cv::Scalar(0,0,255));
 }

// computeCorrespondEpilines image 1
vector<Vec3f> epilines_image2;
 computeCorrespondEpilines(image2_points,2,F,epilines_image2);
 // RESULT
 /*
 [0.00191114, 0.999998, -301.403]
 [0.00240657, 0.999997, -389.559]
 [0.000624515, 1, -72.4628]
 [0.00140625, 0.999999, -211.562]
 [0.00134315, 0.999999, -200.336]
 [0.00319328, 0.999995, -529.543]
 [0.00149637, 0.999999, -227.598]
 [0.00255128, 0.999997, -415.307]
 [0.00100565, 1, -140.282]
 [0.00145705, 0.999999, -220.602]
 [0.000904554, 1, -122.292]
 [0.00250309, 0.999997, -406.732]
 [0.000617913, 1, -71.288]
 [0.000903133, 1, -122.04]
 */

 // draw the lines in image 1
 cout << "values"  <<endl;
 for (vector<Vec3f>::const_iterator it2= epilines_image2.begin(); it2!=epilines_image2.end(); ++it2)
{
     cout << (*it2) <<endl;
     line(image1,cv::Point(0,-(*it2)[2]/(*it2)[1]),cv::Point(image1.cols,-((*it2)[2]+(*it2)[0]*image1.cols)/(*it2)[1]),cv::Scalar(0,0,255));
 }


  // show the images
  namedWindow("Image 1");
  imshow("Image 1", image1);
  namedWindow("Image 2");
  imshow("Image 2", image2);

  // save the images
  std::vector<int> qualityType;
  qualityType.push_back(CV_IMWRITE_JPEG_QUALITY);
  qualityType.push_back(90);
  imwrite("epilines1.jpg",image1,qualityType);
  imwrite("epilines2.jpg",image2,qualityType);
    return a.exec();
}

UPDATE 2015.07.06 -----UPDATE 2015.07.06------- UPDATE 2015.07.06

Thanks for the reply

Excuse my last math lesson was 15 years ago.

@Eduardo I have the Essential matrix.

I switched to opencv 3

The function "RecoverPose" gives the values Rotation and Translation

Rotation

[0.9859449185096264, 0.167070695061541, -2.273102883446428e-05; -0.1670706961854756, 0.9859449165662832, -6.303336618253666e-05; 1.18805140270606e-05, 6.594511589667493e-05, 0.9999999977550477]

Translation

[-0.9954893808642969, 0.09487302832690769, -3.290137744252166e-05]

The function "RQDecomp3x3" should give the values Roll, Pitch and Yaw. But the function returns three Matrixs.

Qx

[1, 0, 0; 0, 0.9999999980133975, 6.303336619882133e-05; 0, -6.303336619882133e-05, 0.9999999980133975]

Qy

[0.9999999997416501, 0, 2.273102883446428e-05; 0, 1, 0; -2.273102883446428e-05, 0, 0.9999999997416501]

Qz

[0.985944918764345, -0.1670706951047037, 0; 0.1670706951047037, 0.985944918764345, 0; 0, 0, 1]

How do I get to the values Roll, Pitch and Yaw.

I've tried the following:

float xAngle = (atan2f(R.at<float>(2, 1), R.at<float>(2, 2)))/0.0174527777778;

 float yAngle = (atan2f(-R.at<float>(2, 0), sqrtf(R.at<float>(2, 1) * R.at<float>(2, 1) + R.at<float>(2, 2) *   R.at<float>(2, 2))))/0.0174527777778;

float zAngle = (atan2f(R.at<float>(1, 0), R.at<float>(0,0)))/0.0174527777778;

But the values are incorrect.

That should be the correct values Roll 5° Pitch 0° Yaw -45°

Is there a function that outputs the values xAngle ,yAngle and zAngle.

Here is my code

#include <QCoreApplication>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include <iostream>

using namespace cv;
using namespace std;
Mat image1;
Mat image2;
Mat  E;

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);
    image1= cv::imread("cam1c.jpg");
    image2= cv::imread("cam2c.jpg");

    // this are the points
      // I added the point values manually (it is a test)
      vector<Point2f> image1_points;
      vector<Point2f> image2_points;
      vector<Point2d> principle_point;
      principle_point.push_back(Point(0,0));

      image1_points.push_back(Point(403.83,299.63));
      image2_points.push_back(Point(401.38 ,300.03));

      image1_points.push_back(Point(311.5,388.5));
      image2_points.push_back(Point(310.45 ,378.28));

      image1_points.push_back(Point(741.9,72.08));
      image2_points.push_back(Point(567.58 ,160.20));

      image1_points.push_back(Point(488.45,211.58));
      image2_points.push_back(Point(397.43 ,237.73));

      image1_points.push_back(Point(250.6,200.43));
      image2_points.push_back(Point(314.95 ,229.7));

      image1_points.push_back(Point(171,529.08));
      image2_points.push_back(Point(359.9 ,477.5));

      image1_points.push_back(Point(400,227.75));
      image2_points.push_back(Point(272.78 ,251.90));

      image1_points.push_back(Point(513.95,414));
      image2_points.push_back(Point(508.15 ,361.03));

      image1_points.push_back(Point(280.68,140.9));
      image2_points.push_back(Point(223.55 ,178.93));

      image1_points.push_back(Point(479.58,220.48));
      image2_points.push_back(Point(355.98 ,244.63));

      image1_points.push_back(Point(621.95,122.48));
      image2_points.push_back(Point(454.78 ,179.60));

      image1_points.push_back(Point(293.93,406.13));
      image2_points.push_back(Point(73.90 ,435.93));

      image1_points.push_back(Point(570.98,72.1));
      image2_points.push_back(Point(511.03 ,153.8));

      image1_points.push_back(Point(44.88,122.43));
      image2_points.push_back(Point(85.60 ,153.63));

      double focal = 0.0286;
      cv::Point2d pp(400.0, 300.0);
      Mat R, t2, mask;

      E = cv::findEssentialMat(image1_points, image2_points,focal,pp,FM_RANSAC,0.99,1,mask);

      Mat R1;
      Mat R2;
      Mat t;

      recoverPose(E, image1_points, image2_points, R, t2, focal, pp, mask);

      cout <<  R.t()<<endl;
      //RESULT
      //[0.9859449185096264, 0.167070695061541, -2.273102883446428e-05;
      // -0.1670706961854756, 0.9859449165662832, -6.303336618253666e-05;
      // 1.18805140270606e-05, 6.594511589667493e-05, 0.9999999977550477]

      cout <<  t2.t()<<endl;
      //RESULT
      //[-0.9954893808642969, 0.09487302832690769, -3.290137744252166e-05]


      Mat mtxR,mtxQ;
      Mat Qx,Qy,Qz;
      Point3d e;

      e= RQDecomp3x3(R, mtxR,mtxQ,Qx, Qy, Qz);

      cout <<  Qx.t()<<endl;
      //RESULT
      //[1, 0, 0;
      // 0, 0.9999999980133975, 6.303336619882133e-05;
      // 0, -6.303336619882133e-05, 0.9999999980133975]


      cout <<  Qy.t()<<endl;
      //RESULT
      //[0.9999999997416501, 0, 2.273102883446428e-05;
      //0, 1, 0;
      //-2.273102883446428e-05, 0, 0.9999999997416501]


      cout <<  Qz.t()<<endl;
      //RESULT
      //[0.985944918764345, -0.1670706951047037, 0;
      //0.1670706951047037, 0.985944918764345, 0;
      //0, 0, 1]

      cout <<  e<<endl;
      //RESULT
      //[-0.00361155, 0.00130239, 9.61755]



    return a.exec();
}