I want to use the calcOpticalFlowPyrLK()
to track Points I found after a (larger) detection/Matching of a Image.
The first recognition works with FAST/SIFT and matches the found Points are stored for the OpticalFlow Tracking.
I tried to eliminate Points where the status is not 1. After that I want calculate the Homography of the old Point set from the recognition and the new Point set from tracking. Afterwards, I want to calculate the perspective Transformation on the Corner Points so I can Mark the detected Object with a rectangle.
Enough talk here the Code and my Output:
std::cout << "****KLT TRACKING ****" << std::endl;
cv::Mat firstImg = cv::imread(imgPathtest7, -1);
cv::Mat firstgry;
cvtColor(firstImg, firstgry, CV_BGR2GRAY);
std::vector<cv::Point2f> estPoints;
std::vector<uchar> stat;
std::vector<float> errs;
cv::calcOpticalFlowPyrLK(gry, firstgry, ransacs, estPoints, stat, errs);
std::vector<cv::Point2f> estCorners;
for(int i = 0; i < estPoints.size(); i++){
if(stat[i]){
cv::circle(firstImg, estPoints[i], 5, cv::Scalar(0, 0, 255), 2);
std::cout << "Estimated Point:" << estPoints[i] << std::endl;
std::cout << "Status: " << stat[i] << std::endl;
} else {
estPoints.erase(estPoints.begin() + i);
ransacs.erase(ransacs.begin() + i);
}
std::cout << "Error: " << errs[i] << std::endl;
}
std::cout << "Size of ransacs/estPoints: " << ransacs.size() << " " << estPoints.size() << std::endl;
cv::Mat f = cv::findHomography(ransacs, estPoints);
std::cout << "Homo: " << f << std::endl;
cv::perspectiveTransform(scene_corners, estCorners, f);
cv::line(firstImg, estCorners[0], estCorners[1], cv::Scalar(0, 255, 0), 4);
cv::line(firstImg, estCorners[1], estCorners[2], cv::Scalar(0, 255, 0), 4);
cv::line(firstImg, estCorners[2], estCorners[3], cv::Scalar(0, 255, 0), 4);
cv::line(firstImg, estCorners[3], estCorners[0], cv::Scalar(0, 255, 0), 4);
cv::imshow("First Iteration", firstImg);
std::cout << "**** NEXT KLT TRACK ****" << std::endl;
//Second Image
cv::Mat nextMatchImg = cv::imread(imgPathtest6, -1);//STRING)
cv::Mat nextGry;
cvtColor(nextMatchImg, nextGry, CV_BGR2GRAY);
std::vector<cv::Point2f> estimatedPoints;
std::vector<uchar> status;
std::vector<float> err;
cv::calcOpticalFlowPyrLK(firstgry, nextGry, estPoints, estimatedPoints, status, err);
std::vector<cv::Point2f> estimatedCorners;
for(int i = 0; i < estimatedPoints.size(); i++){
if(status[i]){
cv::circle(nextMatchImg, estimatedPoints[i], 5, cv::Scalar(0, 0, 255), 2);
std::cout << "Estimated Point:" << estimatedPoints[i] << std::endl;
std::cout << "Status: " << status[i] << std::endl;
}else{
estimatedPoints.erase(estimatedPoints.begin() + i);
estPoints.erase(estPoints.begin() + i);
}
std::cout << "Error: " << err[i] << std::endl;
}
std::cout << "Size of ransacs/estimatedPoints: " << ransacs.size() << " " << estimatedPoints.size() << std::endl;
cv::Mat f2 = cv::findHomography(estPoints, estimatedPoints);
std::cout << "Homo: " << f2 << std::endl;
cv::perspectiveTransform(estCorners, estimatedCorners, f2);
cv::line(nextMatchImg, estimatedCorners[0], estimatedCorners[1], cv::Scalar(0, 255, 0), 4);
cv::line(nextMatchImg, estimatedCorners[1], estimatedCorners[2], cv::Scalar(0, 255, 0), 4);
cv::line(nextMatchImg, estimatedCorners[2], estimatedCorners[3], cv::Scalar(0, 255, 0), 4);
cv::line(nextMatchImg, estimatedCorners[3], estimatedCorners[0], cv::Scalar(0, 255, 0), 4);
cv::imshow("Example KLT", nextMatchImg);
After that I get this Result:
The left Image is the Result after Recogniton. The next two Images use Tracking via calcOpticalFlowPyrLK
Some of the previously detected Points are at a totally different place in the next Image but the Status says everything is fine. But the result is fine. In the last Image the Results is too bad. As seen, the rectangle around the Image is somewhere and not where it should be.
I hope, somebody knows, what I am doing wrong or can help me find a better solution.