I am trying to run the code you see below based on this C++ code. The goal of this piece of code is to know the orientation of ones head and draw a line from his nose to some arbitrary plane. The function fails when trying to execute projectPoints(). When trying to run this code I get the following error and output:
nose end point: [ 0. 0. 1000.]
rotVector: [[-3.04843683]
[ 0.03318151]
[ 0.31324333]]
translVector: [[ 91.013357 ]
[ -4.95273482]
[ 401.22675029]]
camera matrix: [[ 320. 0. 320.]
[ 0. 320. 240.]
[ 0. 0. 1.]]
dist coeffs: [ 0. 0. 0. 0.]
noseEndPoint2d: [ 0. 0.]
OpenCV Error: Assertion failed (npoints >= 0 && (depth == CV_32F || depth == CV_64F)) in projectPoints, file /home/John/opencv2/opencv-2.4.11/modules/calib3d/src/calibration.cpp, line 3349
Traceback (most recent call last):
File "./myHeadOrientation.py", line 62, in <module>
cv2.projectPoints(noseEndPoint3d, rotVector, translVector, cameraMatrix, distCoeffs, noseEndPoint2d)
cv2.error: /home/John/opencv2/opencv-2.4.11/modules/calib3d/src/calibration.cpp:3349: error: (-215) npoints >= 0 && (depth == CV_32F || depth == CV_64F) in function projectPoints
This is my code:
#! /usr/bin/env python2
import cv2
import alignDlib
import openface
import numpy as np
def getModelPoints():
modelPoints = np.array([[0,0,0],[0, -330, -65],[-225, 170, -135],[225, 170, -135],[-150, -150, -125],[150, -150, -125]], dtype=np.float)
return modelPoints
if __name__=='__main__':
#camera setup
video_capture = cv2.VideoCapture(0)
video_capture.set(3, 640)
video_capture.set(4, 480)
#camera intrinsics
imgCols = 640
focalLength = imgCols/2
cameraMatrix = np.array([[focalLength, 0, 640/2],[0, focalLength, 480/2],[0, 0, 1]], dtype=np.float)
# distCoeffs = np.array([[0],[0],[0],[0]], dtype=np.float)
distCoeffs = np.array([0, 0, 0, 0], dtype=np.float)
rotVector = np.array([0], dtype=np.float)
translVector = np.array([0], dtype=np.float)
noseEndPoint3d = np.array([0, 0, 1000], dtype=np.float)
noseEndPoint2d = np.array([0, 0], dtype=np.float)
pathLandMartExtr = '../models/dlib/shape_predictor_68_face_landmarks.dat'
align = openface.AlignDlib(pathLandMartExtr)
while(True):
ret, frameRGB = video_capture.read()
bb = align.getLargestFaceBoundingBox(frameRGB)
landmarkLoc = align.findLandmarks(frameRGB, bb)
cv2.circle(frameRGB, landmarkLoc[30], 2, (0,0,255), 1,8,0) #nose
cv2.circle(frameRGB, landmarkLoc[8], 2, (0,0,255), 1,8,0) #chin
cv2.circle(frameRGB, landmarkLoc[36], 2, (0,0,255), 1,8,0) #left eye
cv2.circle(frameRGB, landmarkLoc[45], 2, (0,0,255), 1,8,0) #right eye
cv2.circle(frameRGB, landmarkLoc[48], 2, (0,0,255), 1,8,0) #left mouth corner
cv2.circle(frameRGB, landmarkLoc[54], 2, (0,0,255), 1,8,0) #right mouth corner
twoDImagePoints = np.array([ [landmarkLoc[30]], [landmarkLoc[8]], [landmarkLoc[36]],[landmarkLoc[45]], [landmarkLoc[48]], [landmarkLoc[54]]], dtype=np.float)
modelPoints = getModelPoints()
retval, rotVector, translVector = cv2.solvePnP(modelPoints, twoDImagePoints, cameraMatrix, distCoeffs, rotVector, translVector)
print('nose end point: '+ str(noseEndPoint3d))
print('rotVector: '+ str(rotVector))
print('translVector: '+ str(translVector))
print('camera matrix: ' + str(cameraMatrix))
print('dist coeffs: ' + str(distCoeffs))
print('noseEndPoint2d: ' +str(noseEndPoint2d))
cv2.projectPoints(noseEndPoint3d, rotVector, translVector, cameraMatrix, distCoeffs, noseEndPoint2d)
cv2.line(frameRGB, landmarkLoc[30], noseEndPoint2d, (255,0,0), 2)
cv2.imshow('videostream', frameRGB)
if cv2.waitKey(1) == ord('a'):
startShoot = True
video_capture.release()
cv2.destroyAllWindows()
I don't see what I am doing incorrectly. Could someone help me out on this one?