Use Optical flow to track Feature

asked 2016-06-04 05:33:32 -0600

Wei gravatar image

I use calcOpticalFlowPyrLK() to track keypoint in feature ,but the calcOpticalFlowPyrLK() is unstable. How optimize it?
TrackOpticalFlow function:

static vector<KeyPoint> currPoints,predictPoints;
void trackingOpticalFlow(const cv::Mat sceneMat, const vector<KeyPoint> sceneKeyPoint, vector<KeyPoint> *predictKeyPoint){
    if( sceneMat.empty() || sceneKeyPoint.empty()) return;
    vector<uchar> status;
    vector<float> err;
    vector<Point2f> pt1,pt2;
    vector<Point2f> transPt1,transPt2;
    cv::Mat sceneMat_gray;

    cv::Size winSize = cv::Size(9, 9);
    int maxLevel = 21;
    cv::TermCriteria terminationCriteria = cv::TermCriteria( CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 20, 0.03);

    this->getGrayMat(sceneMat, sceneMat_gray);

    if(m_trackPrev.empty()){
        sceneMat_gray.copyTo(m_trackPrev);
        return;
    }

    if(prevPyr.empty()){
        cv::buildOpticalFlowPyramid(m_trackPrev, prevPyr, winSize, maxLevel, true);
    }

    KeyPoint::convert(sceneKeyPoint, pt1);
    transPt1.clear();
    transPt1 = pt1;

    cv::buildOpticalFlowPyramid(sceneMat_gray, nextPyr, winSize, maxLevel, true);
    cv::calcOpticalFlowPyrLK(prevPyr, nextPyr, transPt1, transPt2, status, err, winSize, maxLevel, terminationCriteria, CV_LKFLOW_PYR_B_READY, 1e-10);

    std::vector<cv::Point2f> trackedPts;
    for (size_t i=0; i<status.size(); i++)
    {
        if (status[i] && transPt1.size()>i && norm(transPt2[i] - transPt1[i]) <= 100)
        {
            trackedPts.push_back(transPt2[i]);
        }
    }

    pt2.clear();
    pt2 = transPt2;


    KeyPoint::convert(pt2, *predictKeyPoint);
    swap(m_trackPrev, sceneMat_gray);
    prevPyr.swap(nextPyr);
}

Input mat in video:

void runTrack(cv::Mat sceneMat){    
   cv::Mat sceneMatGray;
   Mat sceneDescriptor;
   this->getGrayMat(sceneMat, sceneMatGray);
   if(!m_isTrack){
       this->detectFeatures(sceneMatGray, &m_trackPreKeypoints, &sceneDescriptor);

       if(m_trackPreKeypoints.empty() || sceneDescriptor.empty()) {
            m_isDetectObject = false;
            return;
       }else{
            this->matchByKnn(m_patternDescriptor, sceneDescriptor, &m_trackMatches);
       }

       if( m_trackMatches.size() < 4){
            m_isDetectObject = false;
            return;
       }

   }else{

       vector<KeyPoint> outKeypoints;
       outKeypoints.clear();
       this->trackingOpticalFlow(sceneMatGray, m_trackPreKeypoints, &outKeypoints);
       if(outKeypoints.empty()) return;


       for(int i=0;i< m_trackPreKeypoints.size();i++)
             cv::circle(sceneMat, m_trackPreKeypoints[i].pt, 2, CV_RGB(255, 0, 0));

       for(int i=0;i<outKeypoints.size();i++)
              cv::circle(sceneMat, outKeypoints[i].pt, 2, CV_RGB(0, 0, 255));

        m_trackPreKeypoints.swap(outKeypoints);

   }

   cv::Mat homography;
   this->getHomography(m_patternKeypoints, m_trackPreKeypoints, m_trackMatches, &homography);
}
edit retag flag offensive close merge delete

Comments

cv::calcOpticalFlowPyrLK takes as the first argument the previous image and as a second argument the next image. Instead, you pass optical flow pyramids. Why do you do that?

MrX gravatar imageMrX ( 2016-06-04 19:11:51 -0600 )edit

It seems to be able to perform faster.

Wei gravatar imageWei ( 2016-07-14 23:34:57 -0600 )edit