Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Stereo rectification with dissimilar cameras

I have a stereo camera system with two different cameras, with different focal lengths, optical center points, and image resolutions. They are positioned horizontally and the relative rotation is negligible.

I've been given the intrinsic matrices for each camera, their distortion coefficients, as well as the rotation matrix and translation vector describing their relationship.

I want to rectify a pair of photos taken by the cameras at the same time. However, the results have been complete garbage.

I've first tried ignoring that the image resolutions are different, and using cv2.stereoRectify then cv2.initUndistortRectifyMap then cv2.remap. Since this didn't work, I added a preprocessing step to scale both images to the same dimension. The algorithm is now:

  1. Remove distortion from each image using cv2.undistort
  2. Scale the images to the same width and height with cv2.resize
  3. Transform the focal length and optical center points of the camera matrices accordingly (per this answer)
  4. Perform cv2.stereoRectify with the new camera matrices and zero distortion
  5. Compute the rectification map with cv2.initUndistortRectifyMap for each camera
  6. Apply the rectification map with cv2.remap on each image

However again the output is garbage. I've re-read the code to make sure I didn't make any copy-paste errors, have compared with similar implementations, and consulted the relevant chapters in the "Learning OpenCV 3" book. I've written out image files at each step to make sure the undistortion and scaling are correct.

Are there any sanity checks I can do to make sure that the camera matrices I'm receiving are correct?

# Undistort the images without rectifying them
left_ud = cv2.undistort(left, left_camera_matrix, left_distortion_coeffs)
right_ud  = cv2.undistort(right, right_camera_matrix, right_distortion_coeffs)

# Now scale the images to the same width and height
mw = max(left_ud.shape[1], right_ud.shape[1])
mh = max(left_ud.shape[0], right_ud.shape[0])

left_s = cv2.resize(left_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)
right_s  = cv2.resize(right_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)

# Adjust the matrices
left_camera_matrix = left_camera_matrix.dot(np.array([
  [float(mw) / left_ud.shape[1], 0, 0],
  [0, float(mh) / left_ud.shape[0], 0],
  [0, 0, 1]
]))
right_camera_matrix = right_camera_matrix.dot(np.array([
  [float(mw) / right_ud.shape[1], 0, 0],
  [0, float(mh) / right_ud.shape[0], 0],
  [0, 0, 1]
]))

# Clear the distortion coefficients
left_distortion_coeffs = right_distortion_coefficients = np.zeros((1,5))


# Rectify both cameras
R1, R2 = np.zeros((3, 3)), np.zeros((3, 3))
P1, P2 = np.zeros((3, 4)), np.zeros((3, 4))

_, _, _, _, _, left_roi, right_roi = cv2.stereoRectify(
  left_camera_matrix, left_distortion_coeffs,
  right_camera_matrix, right_distortion_coeffs,
  (mw, mh),
  R, T,
  R1, R2, P1, P2,
  flags=cv2.CALIB_ZERO_DISPARITY,
  alpha=0.0
)

# Now compute the rectification maps and apply them
map1, map2 = cv2.initUndistortRectifyMap(
  left_camera_matrix, left_distortion_coeffs,
  R1, P1[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

left_out = cv2.remap(left_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(left_out, (left_roi[0], left_roi[1]), (left_roi[0]+left_roi[2], left_roi[1]+left_roi[3]), (0, 255, 0), thickness=3)

map1, map2 = cv2.initUndistortRectifyMap(
  right_camera_matrix, right_distortion_coeffs,
  R2, P2[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

right_out = cv2.remap(right_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(right_out, (right_roi[0], right_roi[1]), (right_roi[0]+right_roi[2], right_roi[1]+right_roi[3]), (0, 255, 0), thickness=3)


# Now concatenate them
concat = np.zeros((mh, 2*mw, 3))
concat[:,:mw,:] = left_out
concat[:,mw:,:] = right_out

# Draw some horizontal lines
for y in xrange(0, mh, 60):
  cv2.line(concat, (0, y), (2*mw, y), (0, 0, 255))

cv2.imwrite('/tmp/rectified.png', concat)

Stereo rectification with dissimilar cameras

I have a stereo camera system with two different cameras, with different focal lengths, optical center points, and image resolutions. They are positioned horizontally and the relative rotation is negligible.

I've been given the intrinsic matrices for each camera, their distortion coefficients, as well as the rotation matrix and translation vector describing their relationship.

I want to rectify a pair of photos taken by the cameras at the same time. However, the results have been complete garbage.

I've first tried ignoring that the image resolutions are different, and using cv2.stereoRectify then cv2.initUndistortRectifyMap then cv2.remap. Since this didn't work, I added a preprocessing step to scale both images to the same dimension. The algorithm is now:

  1. Remove distortion from each image using cv2.undistort
  2. Scale the images to the same width and height with cv2.resize
  3. Transform the focal length and optical center points of the camera matrices accordingly (per this answer)
  4. Perform cv2.stereoRectify with the new camera matrices and zero distortion
  5. Compute the rectification map with cv2.initUndistortRectifyMap for each camera
  6. Apply the rectification map with cv2.remap on each image

However again the output is garbage. I've re-read the code to make sure I didn't make any copy-paste errors, have compared with similar implementations, and consulted the relevant chapters in the "Learning OpenCV 3" book. I've written out image files at each step to make sure the undistortion and scaling are correct.

Are there any sanity checks I can do to make sure that the camera matrices I'm receiving are correct?

# Undistort the images without rectifying them
left_ud = cv2.undistort(left, left_camera_matrix, left_distortion_coeffs)
right_ud  = cv2.undistort(right, right_camera_matrix, right_distortion_coeffs)

# Now scale the images to the same width and height
mw = max(left_ud.shape[1], right_ud.shape[1])
mh = max(left_ud.shape[0], right_ud.shape[0])

left_s = cv2.resize(left_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)
right_s  = cv2.resize(right_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)

# Adjust the matrices
left_camera_matrix = left_camera_matrix.dot(np.array([
  [float(mw) / left_ud.shape[1], 0, 0],
  [0, float(mh) / left_ud.shape[0], 0],
  [0, 0, 1]
]))
right_camera_matrix = right_camera_matrix.dot(np.array([
  [float(mw) / right_ud.shape[1], 0, 0],
  [0, float(mh) / right_ud.shape[0], 0],
  [0, 0, 1]
]))

# Clear the distortion coefficients
left_distortion_coeffs = right_distortion_coefficients = np.zeros((1,5))


# Rectify both cameras
R1, R2 = np.zeros((3, 3)), np.zeros((3, 3))
P1, P2 = np.zeros((3, 4)), np.zeros((3, 4))

_, _, _, _, _, left_roi, right_roi = cv2.stereoRectify(
  left_camera_matrix, left_distortion_coeffs,
  right_camera_matrix, right_distortion_coeffs,
  (mw, mh),
  R, T,
  R1, R2, P1, P2,
  flags=cv2.CALIB_ZERO_DISPARITY,
  alpha=0.0
alpha=0.0  # tried different values here
)

# Now compute the rectification maps and apply them
map1, map2 = cv2.initUndistortRectifyMap(
  left_camera_matrix, left_distortion_coeffs,
  R1, P1[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

left_out = cv2.remap(left_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(left_out, (left_roi[0], left_roi[1]), (left_roi[0]+left_roi[2], left_roi[1]+left_roi[3]), (0, 255, 0), thickness=3)

map1, map2 = cv2.initUndistortRectifyMap(
  right_camera_matrix, right_distortion_coeffs,
  R2, P2[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

right_out = cv2.remap(right_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(right_out, (right_roi[0], right_roi[1]), (right_roi[0]+right_roi[2], right_roi[1]+right_roi[3]), (0, 255, 0), thickness=3)


# Now concatenate them
concat = np.zeros((mh, 2*mw, 3))
concat[:,:mw,:] = left_out
concat[:,mw:,:] = right_out

# Draw some horizontal lines
for y in xrange(0, mh, 60):
  cv2.line(concat, (0, y), (2*mw, y), (0, 0, 255))

cv2.imwrite('/tmp/rectified.png', concat)

Stereo rectification with dissimilar cameras

I have a stereo camera system with two different cameras, with different focal lengths, optical center points, and image resolutions. They are positioned horizontally and the relative rotation is negligible.

I've been given the intrinsic matrices for each camera, their distortion coefficients, as well as the rotation matrix and translation vector describing their relationship.

I want to rectify a pair of photos taken by the cameras at the same time. However, the results have been complete garbage.

I've first tried ignoring that the image resolutions are different, and using cv2.stereoRectify then cv2.initUndistortRectifyMap then cv2.remap. Since this didn't work, I added a preprocessing step to scale both images to the same dimension. The algorithm is now:

  1. Remove distortion from each image using cv2.undistort
  2. Scale the images to the same width and height with cv2.resize
  3. Transform the focal length and optical center points of the camera matrices accordingly (per this answer)
  4. Perform cv2.stereoRectify with the new camera matrices and zero distortion
  5. Compute the rectification map with cv2.initUndistortRectifyMap for each camera
  6. Apply the rectification map with cv2.remap on each image

However again the output is garbage. I've re-read the code to make sure I didn't make any copy-paste errors, have compared with similar implementations, and consulted the relevant chapters in the "Learning OpenCV 3" book. I've written out image files at each step to make sure the undistortion and scaling are correct.

Are there any sanity checks I can do to make sure that the camera matrices I'm receiving are correct?

# Undistort the images without rectifying them
left_ud = cv2.undistort(left, left_camera_matrix, left_distortion_coeffs)
right_ud  = cv2.undistort(right, right_camera_matrix, right_distortion_coeffs)

# Now scale the images to the same width and height
mw = max(left_ud.shape[1], right_ud.shape[1])
mh = max(left_ud.shape[0], right_ud.shape[0])

left_s = cv2.resize(left_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)
right_s  = cv2.resize(right_ud, (mw, mh), interpolation=cv2.INTER_CUBIC)

# Adjust the matrices
matrices... oops make sure I do (transform)*(camera) not (camera)*(transform)
left_camera_matrix = left_camera_matrix.dot(np.array([
np.array([
  [float(mw) / left_ud.shape[1], 0, 0],
  [0, float(mh) / left_ud.shape[0], 0],
  [0, 0, 1]
]))
]).dot(left_camera_matrix)
right_camera_matrix = right_camera_matrix.dot(np.array([
np.array([
  [float(mw) / right_ud.shape[1], 0, 0],
  [0, float(mh) / right_ud.shape[0], 0],
  [0, 0, 1]
]))
]).dot(right_camera_matrix)

# Clear the distortion coefficients
left_distortion_coeffs = right_distortion_coefficients = np.zeros((1,5))


# Rectify both cameras
R1, R2 = np.zeros((3, 3)), np.zeros((3, 3))
P1, P2 = np.zeros((3, 4)), np.zeros((3, 4))

_, _, _, _, _, left_roi, right_roi = cv2.stereoRectify(
  left_camera_matrix, left_distortion_coeffs,
  right_camera_matrix, right_distortion_coeffs,
  (mw, mh),
  R, T,
  R1, R2, P1, P2,
  flags=cv2.CALIB_ZERO_DISPARITY,
  alpha=0.0  # tried different values here
)

# Now compute the rectification maps and apply them
map1, map2 = cv2.initUndistortRectifyMap(
  left_camera_matrix, left_distortion_coeffs,
  R1, P1[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

left_out = cv2.remap(left_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(left_out, (left_roi[0], left_roi[1]), (left_roi[0]+left_roi[2], left_roi[1]+left_roi[3]), (0, 255, 0), thickness=3)

map1, map2 = cv2.initUndistortRectifyMap(
  right_camera_matrix, right_distortion_coeffs,
  R2, P2[:,:3],
  (mw, mh),
  m1type = cv2.CV_32FC1
)

right_out = cv2.remap(right_s, map1, map2, cv2.INTER_LINEAR)
cv2.rectangle(right_out, (right_roi[0], right_roi[1]), (right_roi[0]+right_roi[2], right_roi[1]+right_roi[3]), (0, 255, 0), thickness=3)


# Now concatenate them
concat = np.zeros((mh, 2*mw, 3))
concat[:,:mw,:] = left_out
concat[:,mw:,:] = right_out

# Draw some horizontal lines
for y in xrange(0, mh, 60):
  cv2.line(concat, (0, y), (2*mw, y), (0, 0, 255))

cv2.imwrite('/tmp/rectified.png', concat)