I adapted the code of this interesting post in this way (only for one point):
Mat A(4,3,CV_32FC1);
A.at<float>(0,0)=u.at<float>(0,0)*P.at<float>(2,0)-P.at<float>(0,0);
A.at<float>(0,1)=u.at<float>(0,0)*P.at<float>(2,1)-P.at<float>(0,1);
A.at<float>(0,2)=u.at<float>(0,0)*P.at<float>(2,2)-P.at<float>(0,2);
A.at<float>(1,0)=u.at<float>(1,0)*P.at<float>(2,0)-P.at<float>(1,0);
A.at<float>(1,1)=u.at<float>(1,0)*P.at<float>(2,1)-P.at<float>(1,1);
A.at<float>(1,2)=u.at<float>(1,0)*P.at<float>(2,2)-P.at<float>(1,2);
A.at<float>(2,0)=u1.at<float>(0,0)*P1.at<float>(2,0)-P1.at<float>(0,0);
A.at<float>(2,1)=u1.at<float>(0,0)*P1.at<float>(2,1)-P1.at<float>(0,1);
A.at<float>(2,2)=u1.at<float>(0,0)*P1.at<float>(2,2)-P1.at<float>(0,2);
A.at<float>(3,0)=u1.at<float>(1,0)*P1.at<float>(2,0)-P1.at<float>(1,0);
A.at<float>(3,1)=u1.at<float>(1,0)*P1.at<float>(2,1)-P1.at<float>(1,1);
A.at<float>(3,2)=u1.at<float>(1,0)*P1.at<float>(2,2)-P1.at<float>(1,2);
Mat B(4,1,CV_32FC1);
B.at<float>(0,0)= -(u.at<float>(0,0)*P.at<float>(2,3)-P.at<float>(0,3));
B.at<float>(1,0)=-(u.at<float>(1,0)*P.at<float>(2,3) -P.at<float>(1,3));
B.at<float>(2,0)=-(u1.at<float>(0,0)*P1.at<float>(2,3) -P1.at<float>(0,3));
B.at<float>(3,0)=-(u1.at<float>(1,0)*P1.at<float>(2,3) -P1.at<float>(1,3));
Mat X;
solve(A,B,X,DECOMP_SVD);
Where u and u1 are the points respectively in the first and second images and P and P1 are the projection matrices acquired multiplying C*[R|t], where C is the matrix of the intrinsic parameters of the camera and R,t the roto- translation matrices representing the camera position.
The problem is that the result i get, the coordinates of the point in the world, is not correct: there is no overflow or something similar, it is just wrong...for ex i get a depth of 35m instead of 2m,or, if i put the camera closer to the 3d point feature, the depth increases. What am I doing wrong? Ps. The camera is already calibrated and the features are retrieved by a rectified image.