# Revision history [back]

### Converting a 2D image point to a 3D world point

I'm developing application for iOS. I'm using the camera matrix according to the book Mastering OpenCV. In my scenario I have a well known box. I know its real dimensions and I know exactly its corner's pixels. Using this information I calculate the camera rotation and the translation vector. From these parameters I'm able to calculate the camera position. I'm checking my calculation by projecting the 3D world coordinate back to the image and I get very accurate results.

The world origin in my case is the middle of the bottom line of the box. The box is open from one side. The image is taken in that direction, so I can see the content of the box.

Now, I have object in the box. I know very well image coordinate (2D) of the corners of this object. I know the real hight of the corner (the real Y and Y <> 0). How do I calculate the world X and Z of the corners of the object.

In this question in the answer by bjoernz he said: "All you can do with the matrices that you have, is to transform a 2D pixel into a 3D line where every point on this line would be projected onto the same 2D pixel.

You will definitely need additional information to reconstruct a 3D point."

I have the real Y (height of the object (40mm)). Also my reference object (the Box) is in the same image.

Here my code:

# include <ctype.h>

using namespace cv; using namespace std;

Point2f point; vector<vector<point2f>> objectPoints(1); vector<vector<point2f>> boxPoints(1);

Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat) { Point3f tmpPoint;

// This fiunction I need to complete
return tmpPoint; }


int main( int argc, char** argv ) {

///////// Loading image


Test/images/box_center640X480.jpg");

namedWindow( "Source", 1 );

///// Setting box corners /////
point = Point2f((float)102,(float)367.5);


//640X480 boxPoints.push_back(point); circle( sourceImage, boxPoints, 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)83,(float)90.5);


//640X480 boxPoints.push_back(point); circle( sourceImage, boxPoints, 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)520,(float)82.5);


//640X480 boxPoints.push_back(point); circle( sourceImage, boxPoints, 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)510.5,(float)361);


//640X480 boxPoints.push_back(point); circle( sourceImage, boxPoints, 3, Scalar(0,255,0), -1, 8);

///// Setting object corners /////
point = Point2f((float)403.5,(float)250);


//640X480 objectPoints.push_back(point); circle( sourceImage, objectPoints, 3, Scalar(0,255,0), -1, 8);

point = Point2f((float)426.5,(float)251.5);


//640X480 objectPoints.push_back(point); circle( sourceImage, objectPoints, 3, Scalar(0,255,0), -1, 8);

imshow("Source", sourceImage);

vector<vector<Point3f>> worldBoxPoints(1);
Point3f tmpPoint;

tmpPoint = Point3f((float)-100,(float)0,(float)0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float)-100,(float)-150,(float)0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)-150,(float)0);
worldBoxPoints.push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)0,(float)0);
worldBoxPoints.push_back(tmpPoint);

std::cout << "There are " << boxPoints.size() << " roomPoints


and " << worldBoxPoints.size() << " worldRoomPoints." << std::endl;

cv::Mat cameraMatrix1(3,3,cv::DataType<double>::type);
cv::setIdentity(cameraMatrix1);

cv::Mat distCoeffs1(4,1,cv::DataType<double>::type);
distCoeffs1.at<double>(0) = 0;
distCoeffs1.at<double>(1) = 0;
distCoeffs1.at<double>(2) = 0;
distCoeffs1.at<double>(3) = 0;

//Taken from Mastring OpenCV
double fx = 6.24860291e+02 * ((float)(sourceImage.cols)/352.);
double fy = 6.24860291e+02 * ((float)(sourceImage.rows)/288.);
double cx = (float)(sourceImage.cols)/2.;
double cy = (float)(sourceImage.rows)/2.;

cameraMatrix1.at<double>(0, 0) = fx;
cameraMatrix1.at<double>(1, 1) = fy;
cameraMatrix1.at<double>(0, 2) = cx;
cameraMatrix1.at<double>(1, 2) = cy;

std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1


<< std::endl; std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;

cv::Mat rvec1(3,1,cv::DataType<double>::type);
cv::Mat tvec1(3,1,cv::DataType<double>::type);

cv::solvePnP(worldBoxPoints, boxPoints, cameraMatrix1,


distCoeffs1, rvec1, tvec1);

std::cout << "rvec --- 1: " << rvec1 << std::endl;
std::cout << "tvec --- 1: " << tvec1 << std::endl;

cv::Mat rvecM1(3,3,cv::DataType<double>::type);
cv::Rodrigues(rvec1,rvecM1);

std::cout << "cameraRotation --- 1 : " << rvecM1 << std::endl;
std::cout << "cameraPosition --- 1 : " << (rvecM1.t())*((-1.0)*tvec1) <<


std::endl;

std::vector<cv::Point2f> projectedPoints1;
cv::projectPoints(worldBoxPoints,


rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);

for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "box point --- 1: " << boxPoints[i] << " Projected to


--- 1: " << projectedPoints1[i] << std::endl; }

vector<vector<Point3f>> worldObjectPoints(1);

tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints.x,


objectPoints.y, /the real Y of the object/ -40.0, fx, fy, cx, cy, tvec1, rvecM1); worldObjectPoints.push_back(tmpPoint);

tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints.x,


objectPoints.y, /the real Y of the object/ -40.0, fx, fy, cx, cy, tvec1, rvecM1); worldObjectPoints.push_back(tmpPoint);

cv::projectPoints(worldObjectPoints,


rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1); for(unsigned int i = 0; i < projectedPoints1.size(); ++i) { std::cout << "object point --- 1: " << objectPoints[i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl; }

waitKey(0);

return 0; }


So, I want to implement the calc3DPointOutOf2DwithYknown function. Of course the parameters are according to what I understand now. If I need other parameters I'll use others.

Thanks you so much, Ilan