calibration: DLT smaller error than solvePNP
I'm trying to calibrate the velodyne lidar to the camera. I picked corresponding points between the camera's image and the range image and then performed direct linear transformation, as well as using solvePNP(). Theoretically, solvePNP should produce the extrinsics that give better reproduction error, but looking at it, seems like that is not the case. Is there anything I'm doing wrong here?
DEBUG:root:error:
> DLT: 100.053025398
>
> DEBUG:root:error: solvePNP: 163.847104258
Here is my code for calibration:
mport numpy as np
import cv2, logging
def with_intrinsic(points2d, points3d):
# tmp1 = []
# for x,y,z in points3d:
# tmp1.append([x, -z, y])
#
# points3d = np.array(tmp1)
scaleX = 1
scaleY = .5
centerX = 784.646528
centerY = 312.174112
focal = 402.206240
# hardcoded camera intrinsics
cam1_intrinsic = np.array([
[focal * scaleX, 0, centerX],
[0, focal * scaleY, centerY],
[0, 0, 1]
])
cam1_K_inverse = np.linalg.inv(cam1_intrinsic)
# direct linear transformation calibration, assumes no intrinsic matrix
assert points2d.shape[0] >= 3
assert points3d.shape[0] == points2d.shape[0]
A = []
points2d_homo = []
for u, v in points2d:
points2d_homo.append([u, v, 1])
points2d_homo = np.array(points2d_homo).T # columns to be data points
points2d_inv = np.dot(cam1_K_inverse, points2d_homo).T
assert points2d_inv.shape == (points2d.shape[0], 3)
assert points2d_inv[0, 2] == 1
for idx in range(points2d.shape[0]):
x3d, y3d, z3d = points3d[idx]
u, v, _ = points2d_inv[idx]
A.append([x3d, y3d, z3d, 1, 0, 0, 0, 0, -u * x3d, -u * y3d, -u * z3d, -u])
A.append([0, 0, 0, 0, x3d, y3d, z3d, 1, -v * x3d, -v * y3d, -v * z3d, -v])
A = np.array(A)
U, D, VT = np.linalg.svd(A)
M = VT.T[:, -1].reshape((3, 4))
total = 0.
for i in range(points2d.shape[0]):
u, v = points2d[i]
x, y, z = points3d[i]
h3d = np.array([[x], [y], [z], [1]])
h2d = np.dot(np.dot(cam1_intrinsic, M), h3d)
predicted_u = h2d[0, 0] / h2d[2, 0]
predicted_v = h2d[1, 0] / h2d[2, 0]
logging.debug("actual u: %s vs %s", u, predicted_u)
logging.debug("actual v: %s vs %s", v, predicted_v)
total += ((u-predicted_u) **2 + (v-predicted_v) **2)**.5
logging.debug("error: %s", total)
return M
def opencv_calibrate(points2d, points3d):
scaleX = 1
scaleY = .5
centerX = 784.646528
centerY = 312.174112
focal = 402.206240
# hardcoded camera intrinsics
cam1_intrinsic = np.array([
[focal * scaleX, 0, centerX],
[0, focal * scaleY, centerY],
[0, 0, 1]
])
_, rvec, tvec = cv2.solvePnP(points3d.astype("float32"), points2d.astype("float32"), cameraMatrix=cam1_intrinsic, distCoeffs=None)
R, J = cv2.Rodrigues(rvec)
total = 0.
for i in range(points2d.shape[0]):
u, v = points2d[i]
x, y, z = points3d[i]
h3d = np.array([[x], [y], [z]])
h2d = np.dot(cam1_intrinsic, np.dot(R, h3d) + tvec)
predicted_u = h2d[0, 0] / h2d[2, 0]
predicted_v = h2d[1, 0] / h2d[2, 0]
logging.debug("opencv actual u: %s vs %s", u, predicted_u)
logging.debug("opencv actual v: %s vs %s", v, predicted_v)
total += ((u-predicted_u) **2 + (v-predicted_v) **2)**.5
logging.debug("error: %s", total)
return rvec, tvec
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
points2d = np.array([[157, 907 ...