How to create an array of Mat by the number of frame?

asked 2013-10-08 00:44:40 -0600

Shaban gravatar image

Hi guys, I wanna create an array of Mat by the number of frame to capture every frame after the object detected. So I can compare between blob in the current frame and blob in 3 frame before the current frame..

I've create one (blobarray), but it return nothing. I think the code pass after check if(readytotrain ==0)

So can you tell me what's wrong with my code?

I'll appreciate any help here. Thank you very much! :)

#include"stdafx.h"
#include<vector>
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>

int main(int argc, char *argv[])
{
    cv::Mat frame;
    cv::Mat fg;     
    cv::Mat blurred;
    cv::Mat threshfg;
    cv::Mat blob;
    cv::Mat blobarray[999999];
    cv::Mat bgmodel;                                            
    cv::namedWindow("Frame");   
    cv::namedWindow("Background Model");
    cv::namedWindow("Blob");
    cv::VideoCapture cap("campus.avi"); 

    cv::BackgroundSubtractorMOG2 bgs;                           

        bgs.nmixtures = 3;
        bgs.history = 1000;
        bgs.bShadowDetection = true;                            
        bgs.nShadowDetection = 0;                               
        bgs.fTau = 0.5;                                         

    std::vector<std::vector<cv::Point>> contours;               

    cv::CascadeClassifier human;
    assert(human.load("hogcascade_pedestrians.xml"));

    for(;;)
    {

        cap >> frame;                           
        std::cout << cap.get(CV_CAP_PROP_POS_FRAMES) << std::endl;
        cv::GaussianBlur(frame,blurred,cv::Size(3,3),0,0,cv::BORDER_DEFAULT);

        bgs.operator()(blurred,fg);                         
        bgs.getBackgroundImage(bgmodel);                                

        cv::erode(fg,fg,cv::Mat(),cv::Point(-1,-1),1);                         
        cv::dilate(fg,fg,cv::Mat(),cv::Point(-1,-1),3);       

        cv::threshold(fg,threshfg,70.0f,255,CV_THRESH_BINARY);

        cv::findContours(threshfg,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);
        cv::cvtColor(threshfg,blob,CV_GRAY2RGB);
        cv::drawContours(blob,contours,-1,cv::Scalar(255,255,255),CV_FILLED,8);

        int cmin = 20; 
        int cmax = 1000;
        std::vector<cv::Rect> rects;

        for(int cnum = 0; cnum < contours.size(); cnum++)
        {

            if(contours[cnum].size() > cmin && contours[cnum].size() < cmax)
            {       

                human.detectMultiScale(frame, rects);   

                int readytotrain = 0;

                if(rects.size() > 0)
                {

                    int pixblob = 0;
                    if(readytotrain = 0)
                    {
                        blobarray[(int)cap.get(CV_CAP_PROP_POS_FRAMES)] = blob;
                        readytotrain++;
                    }else if(readytotrain = 1){
                        blobarray[(int)cap.get(CV_CAP_PROP_POS_FRAMES)] = blob;
                        readytotrain++;
                    }else if(readytotrain = 2){
                        blobarray[(int)cap.get(CV_CAP_PROP_POS_FRAMES)] = blob;
                        readytotrain++;
                    }else{

                        blobarray[(int)cap.get(CV_CAP_PROP_POS_FRAMES)] = blob;
                        for(unsigned int r = 0; r < rects.size(); r++)
                        {
                            for(int cpos = 0; cpos < contours[cnum].size(); cpos++)
                            {

                                cv::Point3_ <uchar>* p = blobarray[(int)cap.get(CV_CAP_PROP_POS_FRAMES)-3].ptr<cv::Point3_ <uchar> >(contours[cnum][cpos].y, contours[cnum][cpos].x);
                                int a = p -> x;
                                int b = p -> y;
                                int c = p -> z;

                                if((a == 255) && (b == 255) && (c == 255))
                                {
                                    pixblob++;
                                }

                            }

                            std::cout << r << std::endl;
                            std::cout << pixblob << std::endl;
                            std::cout << contours[cnum].size() << std::endl;
                            std::cout << std::endl;
                        }
                    }
                }
            }
        }

        cv::imshow("Frame",frame);
        cv::imshow("Background Model",bgmodel);
        cv::imshow("Blob",blob);
        if(cv::waitKey(30) >= 0) break;
    }
    return 0;
}
edit retag flag offensive close merge delete

Comments

You could try using a std::vector&lt;Mat&gt; and use the push_back method to store each frame. Also make sure you either clone() or copyTo() the frames, else you will be overwriting just a single frame each time.

SergioBasurco gravatar imageSergioBasurco ( 2013-10-08 05:52:41 -0600 )edit

Can you give me a simple example?

Shaban gravatar imageShaban ( 2013-10-08 15:55:49 -0600 )edit

Maybe I'm mistaken but it seems you are just pointing to blob instead of doing the Mat assignment. Check here for Mat handling, and here for vector push_back. The push_back method will allow you not to care about the array (vector) size.

SergioBasurco gravatar imageSergioBasurco ( 2013-10-09 04:22:58 -0600 )edit