How do you detect things moving out of picture in a video? [closed]

asked 2015-12-09 08:15:46 -0500

MartinJensen gravatar image

I have some code that makes people into contours and gives them a x.y coordinate. How do i detect if the are on the way out of the picture? Do i have to make some boundaries like ROI or something?

Im pretty new to opencv so keep that in mind.

My code is here:

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/imgproc/imgproc.hpp>

using namespace cv;
using namespace std;

int main(int argc, char* argv[]){
VideoCapture cap; ("http://root:axis@");

    if (!cap.isOpened())  // if not success, exit program
    cout << "Cannot open the video cam" << endl;
    return -1;

double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video

cout << "Frame size : " << dWidth << " x " << dHeight << endl; // Display the height and width of the incoming picture

namedWindow("ThermalLiveFeed",CV_WINDOW_AUTOSIZE); //create a window called "ThermalLiveFeed"
namedWindow("ThresholdLiveFeed",CV_WINDOW_AUTOSIZE); //create a window called "ThresholdLiveFeed"

while (1)

    Mat frame;

    double thresh = 150;
    double maxVal = 255;

    bool bSuccess =; // read a new frame from video

     if (!bSuccess) //if not success, break loop
         cout << "Cannot read a frame from video stream" << endl;

    Mat grey;
    Mat dst;

    cv::cvtColor(frame, grey, CV_BGR2GRAY); //Convert the 3-channel video stream to 1-channel gray

    flip(frame, frame, 1);

    imshow("ThermalLiveFeed", frame); //show the frame in "ThermalLiveFeed" window

    threshold(grey, dst, thresh, maxVal, THRESH_BINARY); // Threshold the 1-channel picture

    imshow("ThresholdLiveFeed", dst); // Show the dst thresholded version of grey

        std::vector<std::vector<cv::Point> > contours;
        Mat imageClone; // Make an array called imageClone
        imageClone = dst.clone(); // Clone data from dst to imageClone
        findContours(imageClone, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE); // Use opencv command to find contours

        Mat drawImg(dst.size(), CV_8UC3, Scalar(0,0,0));

    for(size_t idx = 0; idx < contours.size(); idx++){
        int area = contourArea(contours[idx]);

        cout << idx << " area:" << (area << endl;

    if(area > 500){             // If the area of the blob is above 500 pixels run this

        drawContours(drawImg, contours, idx, Scalar(0,0,255)); // Draw line around contour
        Moments mu = moments(contours[idx], false);  // Create a fixed point in the middle of the contour
        Point2f mc = Point2f(mu.m10/mu.m00, mu.m01/mu.m00);
        cout << "Coordinates: " << mc.x << ", " << mc.y << endl; // Display the x and y coordinates of the image moment
        circle(drawImg, mc, 5,Scalar(0,255,0)); // Draw a green circle around the point

        namedWindow("findContours", WINDOW_AUTOSIZE); // Create window called findContours

        imshow("findContours", drawImg); // Display contours in the window findContours


    if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
        cout << "esc key is pressed by user" << endl;
   return 0;
edit retag flag offensive reopen merge delete

Closed for the following reason question is not relevant or outdated by sturkmen
close date 2020-10-22 14:51:48.492389


If you have the center point, then you can track it and do a difference on the old center and new one and estimate the direction of the movement

thdrksdfthmn gravatar imagethdrksdfthmn ( 2015-12-09 08:28:12 -0500 )edit