Ask Your Question

sagarbhathwar's profile - activity

2016-06-07 22:44:54 -0600 asked a question OpenCV Camshift's tracking window going out of the bounds while tracking a face
void camshift_face(cv::VideoCapture& video)
{
    //Dlib's front face detector
    dlib::frontal_face_detector detector = dlib::get_frontal_face_detector();

    std::string predictor_path("shape_predictor_68_face_landmarks.dat");
    dlib::shape_predictor predictor; //Dlib's facial landmark predictor
    dlib::deserialize(predictor_path) >> predictor;

    //Vector of faces
    std::vector<dlib::rectangle> dets;
    dlib::full_object_detection shape;

    cv::Mat previous_frame, current_frame;
    video >> previous_frame;
    video >> current_frame;

    //For camshift
    cv::Mat hsv;
    cv::Mat hsv_roi;
    cv::Mat mask;
    cv::Mat hist;
    cv::Mat hist_roi;
    cv::Mat roi;
    cv::Mat backproj;
    cv::Mat buf;

    const int MAX_ITERATIONS = 10;
    cv::TermCriteria term_criteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, MAX_ITERATIONS, 1);

    cv::Rect tracking_window;
    cv::RotatedRect track_box;

    std::vector<int> channels{ 0 };
    std::vector<int> hist_size{ 180 };
    std::vector<float> ranges{ 0.0f, 180.0f };

    cv::Scalar lowerb(0.0, 60.0, 32.0);
    cv::Scalar upperb(180.0, 255.0, 255.0);

    while (video.isOpened() && cv::waitKey(1) != 'q')
    {
    //Only start searching for a face when there is a scene change
        if (is_marker(previous_frame, current_frame, 1.0))
        {
            dlib::cv_image<dlib::bgr_pixel> img(previous_frame);
            dets = detector(img);

                    //Start tracking the face once you find it.
            if (dets.size() > 0)
            {
                tracking_window.x = dets[0].left();
                tracking_window.y = dets[0].top();
                tracking_window.width = dets[0].right() - dets[0].left();
                tracking_window.height = dets[0].bottom() - dets[0].top();

                roi = cv::Mat(previous_frame, tracking_window);
                cv::cvtColor(roi, hsv_roi, cv::COLOR_BGR2HSV);
                cv::inRange(hsv_roi, lowerb, upperb, mask);
                cv::calcHist(std::vector<cv::Mat>{hsv_roi}, channels, mask, hist_roi, hist_size, ranges);
                cv::normalize(hist_roi, hist_roi, 1.0, cv::NORM_MINMAX);
                cv::rectangle(previous_frame, tracking_window, cv::Scalar(255, 255, 0), 2);
                while (true)
                {
                    previous_frame = current_frame;
                    video >> current_frame;

                    cv::cvtColor(previous_frame, hsv, cv::COLOR_BGR2HSV);
                    cv::calcBackProject(std::vector<cv::Mat>{hsv}, channels, hist_roi, backproj, ranges, 1.0);
                    track_box = cv::CamShift(backproj, tracking_window, term_criteria);
                    cv::rectangle(previous_frame, track_box.boundingRect(), cv::Scalar(255, 255, 0), 2);

                    cv::imshow("Window", previous_frame);
                    cv::waitKey(0);
                }
            }
        }
        previous_frame = current_frame;
        video >> current_frame;
    }
}

What I am basically doing is using dlib's face landmark detector and using that to track a face(at scene change only). But when I do so, what's happening is that the bounding box's area of tracking goes out of bounds of the face and starts tracking someone else. Are the parameters in my code correct? I am a complete beginner to Computer vision, so any guidance will be appreciated :)

Thanks