1 | initial version |
Hi,
thanks for trying to help. Here are the pics:
Now, below there are two examples of the rvecs. Interestingly you can 'see' when it goes wrong by comparing the values of the correct vectors with the wrong one, s below... Both examples were taken from the exact same scene, one flipped, one normal.
Normal frame:
rvec[0]: [3.0265, 0.19776, -0.32671]
rvec[1]: [2.9941, 0.20444, -0.39449]
rvec[2]: [3.1457, 0.18338, -0.41779]
rvec[3]: [3.1319, 0.17826, -0.39777]
Another frame, same scene but z-axis flipped:
rvec[0]: [3.0238, 0.19807, -0.32165]
rvec[1]: [2.9897, 0.20444, -0.38867]
rvec[2]: [3.0786, 0.17836, -0.39851]
rvec[3]: [2.9951, 0.17127, 0.79585] // Wrong !!! Negative sign on 3rd element is missing ?
The relevant code follows here. The code is somehow "compressed", so I stripped out unnecessary parts ... but anyhow there is nothing special, just normal pose-estimation.
void display() {
std::vector< int > ids;
std::vector< std::vector< Point2f > > corners, rejected;
std::vector< Vec3d > rvecs, tvecs;
Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
detectorParams->doCornerRefinement = YES;
Mat image_copy;
// variable 'image' contains the input image
cv:cvtColor(image, image_copy, CV_BGRA2BGR);
//Mat tmp = image_copy.clone();
//undistort(tmp, image_copy, cameraMatrix, distCoeffs);
// detect markers and estimate pose
detectMarkers(image_copy, dictionary, corners, ids, detectorParams, rejected);
if(ids.size() == 4) {
// bring the 4 markers into a defined order: tl,tr,br,bl
std::sort(corners.begin(), corners.end(), compare_y);
std::sort(corners.begin(), corners.end()-2, compare_x1);
std::sort(corners.begin()+2, corners.end(), compare_x2);
// estimate all the poses at once
rvecs.clear();
tvecs.clear();
float markerLength = 150; // mm
estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs, rvecs, tvecs);
for(unsigned int i = 0; i < ids.size(); i++) {
// draw axis systems for debugging
aruco::drawAxis(image_copy, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 6.0 * markerLength);
}
}
// display the image with the axes;
// Note: 'image' is the img that is displayed on screen i.e. the output image
cvtColor(image_copy, image, CV_BGR2BGRA ); // CV_BGR2BGRA
}
void getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints) {
CV_Assert(markerLength > 0);
_objPoints.create(4, 1, CV_32FC3);
Mat objPoints = _objPoints.getMat();
// set coordinate system in the middle of the marker, with Z pointing out
objPoints.ptr< Vec3f >(0)[0] = Vec3f(0, 0, 0);
objPoints.ptr< Vec3f >(0)[1] = Vec3f(markerLength, 0, 0);
objPoints.ptr< Vec3f >(0)[2] = Vec3f(markerLength, -markerLength, 0);
objPoints.ptr< Vec3f >(0)[3] = Vec3f(0, -markerLength, 0);
}
void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvecs, OutputArray _tvecs) {
CV_Assert(markerLength > 0);
Mat markerObjPoints;
getSingleMarkerObjectPoints(markerLength, markerObjPoints);
int nMarkers = (int)_corners.total();
_rvecs.create(nMarkers, 1, CV_64FC3);
_tvecs.create(nMarkers, 1, CV_64FC3);
Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
// for each marker, calculate its pose
for (int i = 0; i < nMarkers; i++) {
solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs,
rvecs.at<Vec3d>(i), tvecs.at<Vec3d>(i));
}
}
Any help is highly appreciated! Thanks
Chris
2 | No.2 Revision |
Hi,
thanks for trying to help. Here Pics links are the pics:given in my comment below (thanks to 'karma'):
Now, below there are two examples of the rvecs. Interestingly you can 'see' when it goes wrong by comparing the values of the correct vectors with the wrong one, s below... Both examples were taken from the exact same scene, one flipped, one normal.
Normal frame:
rvec[0]: [3.0265, 0.19776, -0.32671]
rvec[1]: [2.9941, 0.20444, -0.39449]
rvec[2]: [3.1457, 0.18338, -0.41779]
rvec[3]: [3.1319, 0.17826, -0.39777]
Another frame, same scene but z-axis flipped:
rvec[0]: [3.0238, 0.19807, -0.32165]
rvec[1]: [2.9897, 0.20444, -0.38867]
rvec[2]: [3.0786, 0.17836, -0.39851]
rvec[3]: [2.9951, 0.17127, 0.79585] // Wrong !!! Negative sign on 3rd element is missing ?
The relevant code follows here. The code is somehow "compressed", so I stripped out unnecessary parts ... but anyhow there is nothing special, just normal pose-estimation.
void display() {
std::vector< int > ids;
std::vector< std::vector< Point2f > > corners, rejected;
std::vector< Vec3d > rvecs, tvecs;
Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
detectorParams->doCornerRefinement = YES;
Mat image_copy;
// variable 'image' contains the input image
cv:cvtColor(image, image_copy, CV_BGRA2BGR);
//Mat tmp = image_copy.clone();
//undistort(tmp, image_copy, cameraMatrix, distCoeffs);
// detect markers and estimate pose
detectMarkers(image_copy, dictionary, corners, ids, detectorParams, rejected);
if(ids.size() == 4) {
// bring the 4 markers into a defined order: tl,tr,br,bl
std::sort(corners.begin(), corners.end(), compare_y);
std::sort(corners.begin(), corners.end()-2, compare_x1);
std::sort(corners.begin()+2, corners.end(), compare_x2);
// estimate all the poses at once
rvecs.clear();
tvecs.clear();
float markerLength = 150; // mm
estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs, rvecs, tvecs);
for(unsigned int i = 0; i < ids.size(); i++) {
// draw axis systems for debugging
aruco::drawAxis(image_copy, cameraMatrix, distCoeffs, rvecs[i], tvecs[i], 6.0 * markerLength);
}
}
// display the image with the axes;
// Note: 'image' is the img that is displayed on screen i.e. the output image
cvtColor(image_copy, image, CV_BGR2BGRA ); // CV_BGR2BGRA
}
void getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints) {
CV_Assert(markerLength > 0);
_objPoints.create(4, 1, CV_32FC3);
Mat objPoints = _objPoints.getMat();
// set coordinate system in the middle of the marker, with Z pointing out
objPoints.ptr< Vec3f >(0)[0] = Vec3f(0, 0, 0);
objPoints.ptr< Vec3f >(0)[1] = Vec3f(markerLength, 0, 0);
objPoints.ptr< Vec3f >(0)[2] = Vec3f(markerLength, -markerLength, 0);
objPoints.ptr< Vec3f >(0)[3] = Vec3f(0, -markerLength, 0);
}
void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvecs, OutputArray _tvecs) {
CV_Assert(markerLength > 0);
Mat markerObjPoints;
getSingleMarkerObjectPoints(markerLength, markerObjPoints);
int nMarkers = (int)_corners.total();
_rvecs.create(nMarkers, 1, CV_64FC3);
_tvecs.create(nMarkers, 1, CV_64FC3);
Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat();
// for each marker, calculate its pose
for (int i = 0; i < nMarkers; i++) {
solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs,
rvecs.at<Vec3d>(i), tvecs.at<Vec3d>(i));
}
}
Any help is highly appreciated! Thanks
Chris