Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

what is wrong with the below code ? Crashes on "cv::projectPoints"line in release mode, does not crash in debug mode but "cv::projectPoints" function does not function.

include <opencv2 opencv.hpp="">

include <dlib image_processing.h="">

include <dlib gui_widgets.h="">

include <dlib image_io.h="">

include <dlib dir_nav.h="">

include <dlib opencv.h="">

include <iostream>

include <dlib image_processing="" frontal_face_detector.h="">

include <dlib image_processing="" render_face_detections.h="">

include <dlib image_processing.h="">

using namespace dlib; using namespace std; using namespace cv;

Scalar red(0, 0, 255); Scalar green(0, 255, 0); Scalar blue(255, 0, 0); Scalar yellow(0, 255, 255);

double FACE_DOWNSAMPLE_RATIO = 2.0; int SKIP_FRAMES = 2;

void nokta_koy(const int bas, const int bit, full_object_detection shape, std::vector<cv::point2d>* par);

int main(int argc, char** argv) { cv::Mat frame; cv::VideoCapture cap; cap.open("video/Woman03.mp4"); // float 32 bit // double 64 bit std::vector<cv::point2f> kpoints_2d; kpoints_2d.push_back(cv::Point2f(40.00f, 31.57863f)); std::vector<cv::point3f> kpoints_3d; kpoints_3d.push_back(cv::Point3f(40.00f, 31.57863f,20.00f));

std::vector<cv::Point2d> points_2d;
std::vector<cv::Point3d> points_3d;
// Left eyebrow     17 = 21;
points_3d.push_back(cv::Point3d(-22.33010, 31.18483, 14.48982));
points_3d.push_back(cv::Point3d(-21.13250, 30.87293, 14.94626));
points_3d.push_back(cv::Point3d(-19.58109, 30.82237, 15.09587));
points_3d.push_back(cv::Point3d(-18.50974, 31.03186, 14.68483));
points_3d.push_back(cv::Point3d(-17.17672, 31.55783, 14.03172));
// Right eyebrow    22 = 26;
points_3d.push_back(cv::Point3d(-12.24395, 33.62098, 14.00198));
points_3d.push_back(cv::Point3d(-10.66202, 34.28308, 14.78618));
points_3d.push_back(cv::Point3d(-9.45469, 35.27701, 15.08884));
points_3d.push_back(cv::Point3d(-8.72852, 36.25310, 15.00331));
points_3d.push_back(cv::Point3d(-8.10672, 37.56454, 14.63086));
// Nose bridge      27 = 30;
points_3d.push_back(cv::Point3d(-14.73220, 32.31916, 12.17609));
points_3d.push_back(cv::Point3d(-14.39165, 31.89311, 9.46658));
points_3d.push_back(cv::Point3d(-13.64990, 30.19387, 6.50432));
points_3d.push_back(cv::Point3d(-13.63577, 29.96962, 5.79795));
// Lower nose       31 = 35;
points_3d.push_back(cv::Point3d(-16.76307, 31.30655, 4.26300));
points_3d.push_back(cv::Point3d(-15.79540, 31.64108, 4.09090));
points_3d.push_back(cv::Point3d(-14.49324, 31.98343, 3.51114));
points_3d.push_back(cv::Point3d(-13.32834, 32.65988, 3.99250));
points_3d.push_back(cv::Point3d(-12.51400, 33.40477, 4.02762));
// Let eye          36 = 41;
points_3d.push_back(cv::Point3d(-22.88804, 32.08460, 11.35419));
points_3d.push_back(cv::Point3d(-21.30383, 31.52723, 11.85955));
points_3d.push_back(cv::Point3d(-19.53373, 31.91928, 12.00906));
points_3d.push_back(cv::Point3d(-18.71059, 32.92976, 11.02460));
points_3d.push_back(cv::Point3d(-19.47160, 32.26871, 10.42905));
points_3d.push_back(cv::Point3d(-21.24849, 31.57863, 10.44351));
// Right eye        42 = 47;
points_3d.push_back(cv::Point3d(-12.63167, 35.37018, 10.82787));
points_3d.push_back(cv::Point3d(-10.99759, 35.54239, 11.70073));
points_3d.push_back(cv::Point3d(-9.34893, 36.14645, 12.01417));
points_3d.push_back(cv::Point3d(-8.69870, 37.68277, 11.43927));
points_3d.push_back(cv::Point3d(-9.65058, 36.23913, 10.72708));
points_3d.push_back(cv::Point3d(-11.07719, 35.74884, 10.58645));

cap >> frame;
cv::resize(frame, frame, cv::Size(), double(1.0 / FACE_DOWNSAMPLE_RATIO), double(1.0 / FACE_DOWNSAMPLE_RATIO));
double focal_length;
cv::Point2d merkez;
cv::Mat camera_matrix = cv::Mat::zeros(3, 3, CV_64F);
cv::Mat dist_coeffs = cv::Mat::zeros(4, 1, CV_64F);    // vector of distortion coefficients
cv::Mat rotation_vector = cv::Mat::zeros(3, 1, CV_64F);          // output rotation vector
cv::Mat translation_vector = cv::Mat::zeros(3, 1, CV_64F);
focal_length = frame.cols; // Approximate focal length.
merkez = cv::Point2d(frame.cols / 2, frame.rows / 2);

camera_matrix.at<double>(0, 0) = focal_length;
camera_matrix.at<double>(0, 1) = 0;
camera_matrix.at<double>(0, 2) = merkez.x;
camera_matrix.at<double>(1, 0) = 0;
camera_matrix.at<double>(1, 1) = focal_length;
camera_matrix.at<double>(1, 2) = merkez.y;
camera_matrix.at<double>(2, 0) = 0;
camera_matrix.at<double>(2, 1) = 0;
camera_matrix.at<double>(2, 2) = 1;
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor sp;
deserialize("shape_predictor_68_face_landmarks.dat/shape_predictor_68_face_landmarks.dat") >> sp;
image_window win;

for (;;)
    {
        dlib::cv_image<bgr_pixel> img(frame);
        std::vector<dlib::rectangle> dets = detector(img);
        std::vector<full_object_detection> shapes;
        for (unsigned long j = 0; j < dets.size(); ++j)
        {
            full_object_detection shape = sp(img, dets[j]);
            shapes.push_back(shape);
            int bas, bit = 0;
            bas = 17;// Left eyebrow
            bit = 21;
            nokta_koy(bas, bit, shape, &points_2d);
            bas = 22;// Right eyebrow
            bit = 26;
            nokta_koy(bas, bit, shape, &points_2d);
            bas = 27;// Nose bridge
            bit = 30;
            nokta_koy(bas, bit, shape, &points_2d);
            bas = 31;// Lower nose
            bit = 35;
            nokta_koy(bas, bit, shape, &points_2d);
            bas = 36;// Left eye
            bit = 41;
            nokta_koy(bas, bit, shape, &points_2d);
            bas = 42;// Right eye
            bit = 47;
            nokta_koy(bas, bit, shape, &points_2d);
        }

        cv::solvePnP(points_3d, points_2d, camera_matrix, dist_coeffs, rotation_vector, translation_vector, CV_ITERATIVE);

        cout << camera_matrix << endl;
        cout << dist_coeffs << endl;
        cout << rotation_vector << endl;
        cout << translation_vector << endl;

        cv::projectPoints(kpoints_3d, rotation_vector, translation_vector, camera_matrix, dist_coeffs, kpoints_2d);

        win.clear_overlay();
        win.set_image(img);
        win.add_overlay(render_face_detections(shapes));
        points_2d.clear();
        cap >> frame;
        cv::resize(frame, frame, cv::Size(), 1.0 / FACE_DOWNSAMPLE_RATIO, 1.0 / FACE_DOWNSAMPLE_RATIO);
}

} void nokta_koy(const int bas, const int bit, full_object_detection shape, std::vector<cv::point2d>* par) { for (int i = bas; i <= bit; ++i) { par->push_back(cv::Point(shape.part(i).x(), shape.part(i).y())); } return; }

what is wrong with the below code ? Crashes on "cv::projectPoints"line in release mode, does not crash in debug mode but "cv::projectPoints" function does not function.

include <opencv2 opencv.hpp="">

include <dlib image_processing.h="">

include <dlib gui_widgets.h="">

include <dlib image_io.h="">

include <dlib dir_nav.h="">

include <dlib opencv.h="">

include <iostream>

include <dlib image_processing="" frontal_face_detector.h="">

include <dlib image_processing="" render_face_detections.h="">

include <dlib image_processing.h="">

#include <opencv2/opencv.hpp>
#include <dlib/image_processing.h>
#include <dlib/gui_widgets.h>
#include <dlib/image_io.h>
#include <dlib/dir_nav.h>
#include <dlib/opencv.h>
#include <iostream>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing/render_face_detections.h>
#include <dlib/image_processing.h>

using namespace dlib;
using namespace std;
using namespace cv;

cv; Scalar red(0, 0, 255); Scalar green(0, 255, 0); Scalar blue(255, 0, 0); Scalar yellow(0, 255, 255);

255); double FACE_DOWNSAMPLE_RATIO = 2.0; int SKIP_FRAMES = 2;

2; void nokta_koy(const int bas, const int bit, full_object_detection shape, std::vector<cv::point2d>* par);

std::vector<cv::Point2d>* par); int main(int argc, char** argv) { cv::Mat frame; cv::VideoCapture cap; cap.open("video/Woman03.mp4"); // float 32 bit // double 64 bit std::vector<cv::point2f> std::vector<cv::Point2f> kpoints_2d; kpoints_2d.push_back(cv::Point2f(40.00f, 31.57863f)); std::vector<cv::point3f> std::vector<cv::Point3f> kpoints_3d; kpoints_3d.push_back(cv::Point3f(40.00f, 31.57863f,20.00f));

31.57863f,20.00f));
std::vector<cv::Point2d> points_2d;
std::vector<cv::Point3d> points_3d;
// Left eyebrow 17 = 21;
points_3d.push_back(cv::Point3d(-22.33010, 31.18483, 14.48982));
points_3d.push_back(cv::Point3d(-21.13250, 30.87293, 14.94626));
points_3d.push_back(cv::Point3d(-19.58109, 30.82237, 15.09587));
points_3d.push_back(cv::Point3d(-18.50974, 31.03186, 14.68483));
points_3d.push_back(cv::Point3d(-17.17672, 31.55783, 14.03172));
// Right eyebrow 22 = 26;
points_3d.push_back(cv::Point3d(-12.24395, 33.62098, 14.00198));
points_3d.push_back(cv::Point3d(-10.66202, 34.28308, 14.78618));
points_3d.push_back(cv::Point3d(-9.45469, 35.27701, 15.08884));
points_3d.push_back(cv::Point3d(-8.72852, 36.25310, 15.00331));
points_3d.push_back(cv::Point3d(-8.10672, 37.56454, 14.63086));
// Nose bridge 27 = 30;
points_3d.push_back(cv::Point3d(-14.73220, 32.31916, 12.17609));
points_3d.push_back(cv::Point3d(-14.39165, 31.89311, 9.46658));
points_3d.push_back(cv::Point3d(-13.64990, 30.19387, 6.50432));
points_3d.push_back(cv::Point3d(-13.63577, 29.96962, 5.79795));
// Lower nose 31 = 35;
points_3d.push_back(cv::Point3d(-16.76307, 31.30655, 4.26300));
points_3d.push_back(cv::Point3d(-15.79540, 31.64108, 4.09090));
points_3d.push_back(cv::Point3d(-14.49324, 31.98343, 3.51114));
points_3d.push_back(cv::Point3d(-13.32834, 32.65988, 3.99250));
points_3d.push_back(cv::Point3d(-12.51400, 33.40477, 4.02762));
// Let eye 36 = 41;
points_3d.push_back(cv::Point3d(-22.88804, 32.08460, 11.35419));
points_3d.push_back(cv::Point3d(-21.30383, 31.52723, 11.85955));
points_3d.push_back(cv::Point3d(-19.53373, 31.91928, 12.00906));
points_3d.push_back(cv::Point3d(-18.71059, 32.92976, 11.02460));
points_3d.push_back(cv::Point3d(-19.47160, 32.26871, 10.42905));
points_3d.push_back(cv::Point3d(-21.24849, 31.57863, 10.44351));
// Right eye 42 = 47;
points_3d.push_back(cv::Point3d(-12.63167, 35.37018, 10.82787));
points_3d.push_back(cv::Point3d(-10.99759, 35.54239, 11.70073));
points_3d.push_back(cv::Point3d(-9.34893, 36.14645, 12.01417));
points_3d.push_back(cv::Point3d(-8.69870, 37.68277, 11.43927));
points_3d.push_back(cv::Point3d(-9.65058, 36.23913, 10.72708));
points_3d.push_back(cv::Point3d(-11.07719, 35.74884, 10.58645));
cap >> frame;
cv::resize(frame, frame, cv::Size(), double(1.0 / FACE_DOWNSAMPLE_RATIO), double(1.0 / FACE_DOWNSAMPLE_RATIO));
double focal_length;
cv::Point2d merkez;
cv::Mat camera_matrix = cv::Mat::zeros(3, 3, CV_64F);
cv::Mat dist_coeffs = cv::Mat::zeros(4, 1, CV_64F); // vector of distortion coefficients
cv::Mat rotation_vector = cv::Mat::zeros(3, 1, CV_64F); // output rotation vector
cv::Mat translation_vector = cv::Mat::zeros(3, 1, CV_64F);
focal_length = frame.cols; // Approximate focal length.
merkez = cv::Point2d(frame.cols / 2, frame.rows / 2);
camera_matrix.at<double>(0, 0) = focal_length;
camera_matrix.at<double>(0, 1) = 0;
camera_matrix.at<double>(0, 2) = merkez.x;
camera_matrix.at<double>(1, 0) = 0;
camera_matrix.at<double>(1, 1) = focal_length;
camera_matrix.at<double>(1, 2) = merkez.y;
camera_matrix.at<double>(2, 0) = 0;
camera_matrix.at<double>(2, 1) = 0;
camera_matrix.at<double>(2, 2) = 1;
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor sp;
deserialize("shape_predictor_68_face_landmarks.dat/shape_predictor_68_face_landmarks.dat") >> sp;
image_window win;
for (;;)
{
dlib::cv_image<bgr_pixel> img(frame);
std::vector<dlib::rectangle> dets = detector(img);
std::vector<full_object_detection> shapes;
for (unsigned long j = 0; j < dets.size(); ++j)
{
full_object_detection shape = sp(img, dets[j]);
shapes.push_back(shape);
int bas, bit = 0;
bas = 17;// Left eyebrow
bit = 21;
nokta_koy(bas, bit, shape, &points_2d);
bas = 22;// Right eyebrow
bit = 26;
nokta_koy(bas, bit, shape, &points_2d);
bas = 27;// Nose bridge
bit = 30;
nokta_koy(bas, bit, shape, &points_2d);
bas = 31;// Lower nose
bit = 35;
nokta_koy(bas, bit, shape, &points_2d);
bas = 36;// Left eye
bit = 41;
nokta_koy(bas, bit, shape, &points_2d);
bas = 42;// Right eye
bit = 47;
nokta_koy(bas, bit, shape, &points_2d);
}
cv::solvePnP(points_3d, points_2d, camera_matrix, dist_coeffs, rotation_vector, translation_vector, CV_ITERATIVE);
cout << camera_matrix << endl;
cout << dist_coeffs << endl;
cout << rotation_vector << endl;
cout << translation_vector << endl;
cv::projectPoints(kpoints_3d, rotation_vector, translation_vector, camera_matrix, dist_coeffs, kpoints_2d);
win.clear_overlay();
win.set_image(img);
win.add_overlay(render_face_detections(shapes));
points_2d.clear();
cap >> frame;
cv::resize(frame, frame, cv::Size(), 1.0 / FACE_DOWNSAMPLE_RATIO, 1.0 / FACE_DOWNSAMPLE_RATIO);
}

} void nokta_koy(const int bas, const int bit, full_object_detection shape, std::vector<cv::point2d>* std::vector<cv::Point2d>* par) { for (int i = bas; i <= bit; ++i) { par->push_back(cv::Point(shape.part(i).x(), shape.part(i).y())); } return; }

}