Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;

const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;

const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));


cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL

for(int col = 0; col < modelview_matrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row);
    }
}

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

Is anyone able to spot my error?

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

EDIT: it appears i am unable to attach images to my post due to my karma, ill try adding them to the first comment

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;

const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;

const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));


cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL

for(int col = 0; col < modelview_matrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row);
    }
}

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

Is anyone able to spot my error?

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

EDIT: it appears i am unable to attach images to my post due to my karma, ill try adding them to links are in the first comment

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;

const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;

const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));


cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL

for(int col = 0; col < modelview_matrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row);
    }
}

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

Is anyone able to spot my error?

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

EDIT: it appears i am unable to attach images to my post due to my karma, links are in the first comment

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;

const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;

const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));


cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL

for(int col = 0; col < modelview_matrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row);
modelview_matrix.at<double>(col,row); //openGLViewMatrix is a glm::mat4();
    }
}

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

Is anyone able to spot my error?

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

EDIT: it appears i am unable to attach images to my post due to my karma, links are in the first comment

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the marker, though it is very telling that there is some difference in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

const float cx = float(dim.width) / 2.0;
const float cy = float(dim.height) / 2.0;

const float HFOV = format.videoFieldOfView;
const float VFOV = ((HFOV)/cx)*cy;

const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));


cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat cvToGl = Mat::zeros(4, 4, CV_64F);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL
     glm::mat4 openGLViewMatrix;
for(int col = 0; col < modelview_matrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row); //openGLViewMatrix is a glm::mat4();
     }
}

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
const mat4 MVP = _projectionMatrix * openGLViewMatrix * _cube_ModelMatrix;
glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

Is anyone able to spot my error?

Using solvePnP camera pose - object is offset from detected marker

I have a problem in my iOS application where i attempt to obtain a view matrix using solvePnP and render a 3d cube using modern OpenGL. While my code attempts to render a 3d cube directly on top of the detected marker, it seems to render with a certain offset :

EDIT: it appears i am unable to attach images to my post due to my karma, links are in the first comment

(on the bottom right of the image you can see an opencv render of the homography around the tracker marker. and the 3 axis drawn using rvec and tvec. the rest of the screen is an opengl render of the camera input frame and a 3d cube at location (0,0,0).

the cube rotates and translates correctly whenever i move the real world marker, though however it is very telling that there is some difference does not "sit" on top of the marker and seems to not have a constant world position. (see video in the scale of translations (IE, if i move my marker 5cm in the real world, it hardly moves by 1cm on screen)comments)

these are what i believe to be the relevant parts of the code where the error could come from :

Extracting view matrix from homography :

AVCaptureDevice *deviceInput = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceFormat *format = deviceInput.activeFormat;
CMFormatDescriptionRef fDesc = format.formatDescription;
CGSize dim = CMVideoFormatDescriptionGetPresentationDimensions(fDesc, true, true);

static const float cx = float(dim.width) / 2.0;
static const float cy = float(dim.height) / 2.0;

static const float HFOV = format.videoFieldOfView;
static const float VFOV = ((HFOV)/cx)*cy;

static const float fx = abs(float(dim.width) / (2 * tan(HFOV / 180 * float(M_PI) / 2)));
static const float fy = abs(float(dim.height) / (2 * tan(VFOV / 180 * float(M_PI) / 2)));


static const Mat camIntrinsic = Mat::zeros(3, 3, CV_64F);
camIntrinsic.at<double>(0, 0) = fx;
camIntrinsic.at<double>(0, 2) = cx;
camIntrinsic.at<double>(1, 1) = fy;
camIntrinsic.at<double>(1, 2) = cy;
camIntrinsic.at<double>(2, 2) = 1.0;

std::vector<cv::Point3f> object3dPoints;
object3dPoints.push_back(cv::Point3f(-0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,-0.5f,0));
object3dPoints.push_back(cv::Point3f(+0.5f,+0.5f,0));
object3dPoints.push_back(cv::Point3f(-0.5f,+0.5f,0));

(Mat_<double>(3,3) <<
                    fx, 0, cx,
                    0,  fy, cy,
                    0,  0, 1);

static const float objHalfSize = 0.5f;
Mat objPoints;
objPoints.create(4, 1, CV_32FC3);

objPoints.ptr< Vec3f >(0)[0] = Vec3f(-objHalfSize, +objHalfSize, 0);
objPoints.ptr< Vec3f >(0)[1] = Vec3f(+objHalfSize, +objHalfSize, 0);
objPoints.ptr< Vec3f >(0)[2] = Vec3f(+objHalfSize, -objHalfSize, 0);
objPoints.ptr< Vec3f >(0)[3] = Vec3f(-objHalfSize, -objHalfSize, 0);

cv::Mat raux,taux;
cv::Mat Rvec, Tvec;
cv::solvePnP(object3dPoints, cv::Mat distCoeffs = Mat::zeros(5, 1, CV_64F);
cv::solvePnP(objPoints, mNewImageBounds, camIntrinsic, Mat(),raux,taux); //mNewImageBounds are the 4 corner of the homography detected by perspectiveTransform (the green outline seen in the image)
raux.convertTo(Rvec,CV_32F);
distCoeffs,raux,taux);
raux.convertTo(Rvec,CV_64F);
taux.convertTo(Tvec ,CV_64F);

Mat Rot(3,3,CV_32FC1);
Rodrigues(Rvec, Rot);

// [R | t] matrix
Mat_<double> para = Mat_<double>::eye(4,4);
Rot.convertTo(para(cv::Rect(0,0,3,3)),CV_64F);
Tvec.copyTo(para(cv::Rect(3,0,1,3)));

Mat DrawingUtility::drawAxis(image, camIntrinsic,Rvec, Tvec, 0.5f); //debug draw axes

cv::Mat rotation;
cv::Rodrigues(Rvec, rotation);

//compose a view matrix from camera extrinsic rotation and translation
cv::Mat viewMatrix = cv::Mat::zeros(4, 4, CV_64FC1);
for(unsigned int row=0; row<3; ++row)
{
    for(unsigned int col=0; col<3; ++col)
    {
        viewMatrix.at<double>(row, col) = rotation.at<double>(row, col);
    }

    viewMatrix.at<double>(row, 3) = Tvec.at<double>(row, 0);
}

viewMatrix.at<double>(3, 3) = 1.0f;

cv::Mat cvToGl = Mat::zeros(4, cv::Mat::zeros(4, 4, CV_64F);
CV_64FC1);
cvToGl.at<double>(0, 0) = 1.0f;
cvToGl.at<double>(1, 1) = -1.0f; // Invert the y axis
cvToGl.at<double>(2, 2) = -1.0f; // invert the z axis
cvToGl.at<double>(3, 3) = 1.0f;

para viewMatrix = cvToGl * para;

Mat_<double> modelview_matrix;
Mat(para.t()).copyTo(modelview_matrix); // transpose to col-major for OpenGL
    viewMatrix;

glm::mat4 openGLViewMatrix;
V; //OpenGL view matrix
glm::mat4 P; //OpenGL projection matrix
for(int col = 0; col < modelview_matrix.cols; viewMatrix.cols; col++)
{
    for(int row = 0; row < modelview_matrix.rows; viewMatrix.rows; row++)
    {
        openGLViewMatrix[col][row] = modelview_matrix.at<double>(col,row); 
V[row][col] = viewMatrix.at<double>(col,row); //rows and cols are swapped to transpose viewMatrix
    }
}

P = glm::perspective<float>(glm::radians(60.0f), float(1280.0f / 720.0f), 0.05f, 500);
openGLViewProjectionwMatrix = P * V;

i made sure the camera intrinsic matrix contains correct values, the portion which converts the opencv Mat to an opengl view matrix i believe to be correct as the cube translates and rotates in the right directions.

once the view matrix is calculated, i use it to draw the cube as follows :

_projectionMatrix = glm::perspective<float>(radians(60.0f), fabs(view.bounds.size.width / view.bounds.size.height), 0.1f, 100.0f);
_cube_ModelMatrix = glm::translate(glm::vec3(0,0,0));
    const mat4 M = glm::mat4(); //just identity as i want a cube laying at world 0,0,0, with its original scale and no rotation
    const mat4 MVP = _projectionMatrix openGLViewProjectionwMatrix * openGLViewMatrix * _cube_ModelMatrix;
M;
    glUniformMatrix4fv(glGetUniformLocation(_cube_program, "ModelMatrix"), "MVP"), 1, GL_FALSE, value_ptr(MVP));

glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, BUFFER_OFFSET(0));

in shader :

gl_Position =  MVP * vec4(in_position, 1.0);

Is anyone able to spot my error?