Ask Your Question

jys923's profile - activity

2016-09-23 02:04:42 -0600 received badge  Enthusiast
2016-09-19 00:43:43 -0600 received badge  Editor (source)
2016-09-19 00:43:04 -0600 asked a question OpenCV + OpenGL: How to use SolvePnP Rotation and translation vector in OpenGL

I'm working on an AR program.

please heip me dealing's almost done.

I found marker and rvec,tvec by SolvePnP.

I can not draw object in OpenGL.

I am beginer OpenGL,OpenCV.

I want to render 3D cube over that marker using OpenGL.

use support opengl mode by build opencv.

    int main(int argc, char* argv[])
{
    ....
    namedWindow(winname, CV_WINDOW_OPENGL);
    setOpenGlDrawCallback(winname.c_str(), on_opengl);
    .....
        find marker

According to "http://answers.opencv.org/question/23089/opencv-opengl-proper-camera-pose-using-solvepnp/" I'm doing it like this:

Mat rotation(4, 4, CV_64F);
                        Mat viewMatrix = cv::Mat::zeros(4, 4, CV_64FC1);
                        cv::Rodrigues(rvec, rotation);

                        for (unsigned int row = 0; row<3; ++row)
                        {
                            for (unsigned int col = 0; col<3; ++col)
                            {
                                viewMatrix.at<double>(row, col) = rotation.at<double>(row, col);
                            }
                            viewMatrix.at<double>(row, 3) = tvec.at<double>(row, 0);
                        }
                        viewMatrix.at<double>(3, 3) = 1.0f;

                        cv::Mat glViewMatrix = cv::Mat::zeros(4, 4, CV_64F);
                        cv::transpose(viewMatrix, glViewMatrix);
                        glViewMatrix2 = glViewMatrix.clone();

opengl callback

    void on_opengl(void* param)
{
    glClear(GL_COLOR_BUFFER_BIT);
    glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
    glLoadMatrixd(&glViewMatrix2.at<double>(0, 0));
    //glTranslated(glViewMatrix2.at<double>(0, 3), -glViewMatrix2.at<double>(1, 3),0);
    //glTranslated(glViewMatrix2.at<double>(3, 0), -glViewMatrix2.at<double>(3, 1), -glViewMatrix2.at<double>(3, 2));
    glTranslated(0.0, 0.0, 0.0);
    static const int coords[6][4][3] = {
        { { +1, -1, -1 },{ -1, -1, -1 },{ -1, +1, -1 },{ +1, +1, -1 } },
        { { +1, +1, -1 },{ -1, +1, -1 },{ -1, +1, +1 },{ +1, +1, +1 } },
        { { +1, -1, +1 },{ +1, -1, -1 },{ +1, +1, -1 },{ +1, +1, +1 } },
        { { -1, -1, -1 },{ -1, -1, +1 },{ -1, +1, +1 },{ -1, +1, -1 } },
        { { +1, -1, +1 },{ -1, -1, +1 },{ -1, -1, -1 },{ +1, -1, -1 } },
        { { -1, -1, +1 },{ +1, -1, +1 },{ +1, +1, +1 },{ -1, +1, +1 } }
    };

    for (int i = 0; i < 6; ++i) {
        glColor3ub(i * 20, 100 + i * 10, i * 42);
        glBegin(GL_QUADS);
        for (int j = 0; j < 4; ++j) {
            glVertex3d(0.2 * coords[i][j][0], 0.2 * coords[i][j][1], 0.2 * coords[i][j][2]);
        }
        glEnd();
    }
}

i cannot find cube.

if I delete "glLoadMatrixd(&glViewMatrix2.at<double>(0, 0));" this line.

I can find cube on center.

Q2-How to use glut.where can I do"glutInit(&argc2, argv2);"

2016-07-12 09:11:06 -0600 asked a question find trapezoid's 4 points for warping (rectangle-A4paper )

find biggest rectangle. I need to find trapezoid's 4 points I try use "Rect" but only 2points just 1point is good point help me

#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {

//Mat src = imread(argv[1]);
//Mat src = imread("pic1.png");
Mat src = imread("IMG_20160708_1338252.jpg");
imshow("source", src);
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;

Mat thr;
cvtColor(src, thr, COLOR_BGR2GRAY); //Convert to gray
threshold(thr, thr, 125, 255, THRESH_BINARY); //Threshold the gray

vector<vector<Point> > contours; // Vector for storing contours

findContours(thr, contours, RETR_CCOMP, CHAIN_APPROX_SIMPLE); // Find the contours in the image

for (size_t i = 0; i < contours.size(); i++) // iterate through each contour.
{
    double area = contourArea(contours[i]);  //  Find the area of contour
    //src면적이랑 area가 같으면 제외 
    //if (src.size().area != area) {
        if (area > largest_area)
        {
            largest_area = area;
            largest_contour_index = i;               //Store the index of largest contour
            bounding_rect = boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
        }
    //}
}

printf("top%d,%d\n", bounding_rect.tl().x, bounding_rect.tl().y);
printf("bottom%d,%d\n\n", bounding_rect.br().x, bounding_rect.br().y);
printf("top%d,%d\n", bounding_rect.x, bounding_rect.y);
printf("%d,%d\n", bounding_rect.x, bounding_rect.y+ bounding_rect.height);
printf("bottom%d,%d\n", bounding_rect.x + bounding_rect.width, bounding_rect.y);
printf("%d,%d\n\n", bounding_rect.x+ bounding_rect.width, bounding_rect.y+ bounding_rect.height);

//printf("bottom%d,%d\n", bounding_rect.br().x, bounding_rect.br().y);
//contours[largest_contour_index];//가장큰 사각형
//contours[largest_contour_index][0].x; contours[largest_contour_index][0].y;
printf("1-%d,", contours[largest_contour_index]);
printf("1-%d\n", contours[largest_contour_index][0].y);
printf("2-%d,", contours[largest_contour_index][1].x);
printf("2-%d\n", contours[largest_contour_index][1].y);
printf("3-%d,", contours[largest_contour_index][2].x);
printf("3-%d\n", contours[largest_contour_index][2].y);
printf("4-%d,", contours[largest_contour_index][3].x);
printf("4-%d\n", contours[largest_contour_index][3].y);

//printf("1%d,", contours[0][largest_contour_index].x);

drawContours(src, contours, largest_contour_index, Scalar(0, 255, 0), 2); // Draw the largest contour using previously stored index.

imshow("result", src);
waitKey();
return 0;

}

2016-07-09 11:31:27 -0600 asked a question find largest Square and location

I used sample code

#ifdef _CH_

pragma package <opencv>

endif

include <opencv2\opencv.hpp>

include <opencv2\highgui.hpp>

include <stdio.h>

include <math.h>

include <string.h>

int thresh = 50; IplImage* img = 0; IplImage* img0 = 0; CvMemStorage* storage = 0; const char* wndname = "Square Detection Demo";

// helper function: // finds a cosine of angle between vectors // from pt0->pt1 and from pt0->pt2 double angle(CvPoint* pt1, CvPoint* pt2, CvPoint* pt0) { double dx1 = pt1->x - pt0->x; double dy1 = pt1->y - pt0->y; double dx2 = pt2->x - pt0->x; double dy2 = pt2->y - pt0->y; return (dx1dx2 + dy1dy2) / sqrt((dx1dx1 + dy1dy1)(dx2dx2 + dy2*dy2) + 1e-10); }

// returns sequence of squares detected on the image. // the sequence is stored in the specified memory storage CvSeq* findSquares4(IplImage* img, CvMemStorage* storage) { CvSeq* contours; int i, c, l, N = 11; CvSize sz = cvSize(img->width & -2, img->height & -2); IplImage* timg = cvCloneImage(img); // make a copy of input image IplImage* gray = cvCreateImage(sz, 8, 1); IplImage* pyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3); IplImage* tgray; CvSeq* result; double s, t; // create empty sequence that will contain points - // 4 points per square (the square's vertices) CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

// select the maximum ROI in the image 
// with the width and height divisible by 2 
cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));

// down-scale and upscale the image to filter out the noise 
cvPyrDown(timg, pyr, 7);
cvPyrUp(pyr, timg, 7);
tgray = cvCreateImage(sz, 8, 1);

// find squares in every color plane of the image 
for (c = 0; c < 3; c++)
{
    // extract the c-th color plane 
    cvSetImageCOI(timg, c + 1);
    cvCopy(timg, tgray, 0);

    // try several threshold levels 
    for (l = 0; l < N; l++)
    {
        // hack: use Canny instead of zero threshold level. 
        // Canny helps to catch squares with gradient shading 
        if (l == 0)
        {
            // apply Canny. Take the upper threshold from slider 
            // and set the lower to 0 (which forces edges merging) 
            cvCanny(tgray, gray, 0, thresh, 5);
            // dilate canny output to remove potential 
            // holes between edge segments 
            cvDilate(gray, gray, 0, 1);
        }
        else
        {
            // apply threshold if l!=0: 
            // tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 
            cvThreshold(tgray, gray, (l + 1) * 255 / N, 255, CV_THRESH_BINARY);
        }

        // find contours and store them all as a list 
        cvFindContours(gray, storage, &contours, sizeof(CvContour),
            CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

        // test each contour 
        while (contours)
        {
            // approximate contour with accuracy proportional 
            // to the contour perimeter 
            result = cvApproxPoly(contours, sizeof(CvContour), storage,
                CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0);
            // square contours should have 4 vertices after approximation 
            // relatively large area (to filter out noisy contours) 
            // and be convex. 
            // Note: absolute value of an area is used because 
            // area may be positive or negative - in accordance with the 
            // contour orientation 
            if (result->total == 4 &&
                fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 1000 &&
                cvCheckContourConvexity(result))
            {
                s = 0;

                for (i = 0; i < 5; i++)
                {
                    //// find minimum angle between joint 
                    //// edges (maximum of cosine) 
                    //if (i >= 2)
                    //{
                    //  t = fabs(angle(
                    //      (CvPoint*)cvGetSeqElem(result ...
(more)