Hi everyone,
I have been trying to develop a simple feature tracking program. The user outlines an area on the screen with their mouse, and a mask is created for this area and passed to goodFeaturesToTrack. The features found by the function are then drawn on the screen (represented by the blue circles).
Next I pass the feature vector returned by the function to calcOpticalFlowPyrLk and draw the resulting vector of point on the screen (represented by the green circles). Although the program track the optical flow of the features correctly, for some reason the new features do not seem to "track" along with the video.
I feel as though it is a small mistake in the logic I have used on my part, but I just can't seem to decompose it, and I would really appreciate some help from the community.
I have posted my code below, and I would like to greatly apologize for the global variables and messy structure. I am just testing at the moment, and plan to clean up and convert to a class based structure as soon as I get it running.
As well, here is a link to a YouTube video I have uploaded that demonstrates the behavior I am combating.
void draw(cv::Mat image, CvRect rectangle) {
if (drawingBox)
{
cv::rectangle(image, cv::Point(box.x, box.y), cv::Point(box.x + box.width, box.y + box.height), cv::Scalar(225, 238 , 81), 2);
CvRect rectangle2 = cvRect(box.x, box.y, box.width, box.height);
}
if (featuresFound)
{
for (int i = 0; i < originalFeatures.size(); i++)
{
cv::circle(image, baseFeatures[i], 4, cv::Scalar(255, 0, 0), 1, 8, 0);
cv::circle(image, newFeatures[i], 4, cv::Scalar(0, 255, 0),1, 8, 0);
cv::line(image, baseFeatures[i], newFeatures[i], cv::Scalar(255, 0, 0), 2, CV_AA);
}
}
}
void findFeatures(cv::Mat mask)
{
if (!featuresFound && targetAcquired)
{
cv::goodFeaturesToTrack(prevFrame_1C, baseFeatures, 200, 0.1, 0.1, mask); originalFeatures= baseFeatures; featuresFound = true; std::cout << "Number of Corners Detected: " << originalFeatures.size() << std::endl;
for(int i = 0; i < originalFeatures.size(); i++)
{
std::cout << "Corner Location " << i << ": " << originalFeatures[i].x << "," << originalFeatures[i].y << std::endl;
}
}
}
void trackFeatures()
{
cv::calcOpticalFlowPyrLK(prevFrame_1C, nextFrame_1C, originalFeatures, newFeatures, opticalFlowFeatures, opticalFlowFeaturesError, cv::Size(30,30), 5, opticalFlowTermination);
originalFeatures = newFeatures;
}
void mouseCallback(int event, int x, int y, int flags, void *param) {
cv::Mat frame;
frame = ((cv::Mat)param);
switch(event)
{
case CV_EVENT_MOUSEMOVE:
{
if(drawingBox)
{
box.width = x-box.x;
box.height = y-box.y;
}
}
break;
case CV_EVENT_LBUTTONDOWN:
{
drawingBox = true;
box = cvRect (x, y, 0, 0);
targetAcquired = false;
cv::destroyWindow("Selection");
}
break;
case CV_EVENT_LBUTTONUP:
{
drawingBox = false;
featuresFound = false;
boxCounter++;
std::cout << "Box " << boxCounter << std::endl;
std::cout << "Box Coordinates: " << box.x << "," << box.y << std::endl;
std::cout << "Box Height: " << box.height << std::endl;
std::cout << "Box Width: " << box.width << std:: endl << std::endl;
if(box.width < 0)
{
box.x += box.width;
box.width *= -1;
}
if(box.height < 0)
{
box.y +=box.height;
box.height *= -1;
}
objectLocation.x = box.x;
objectLocation.y = box.y;
targetAcquired = true;
}
break;
case CV_EVENT_RBUTTONUP:
{
destroyBox = true;
}
break;
}
}
int main () { const char *name = "Boundary Box"; cv::namedWindow(name);
cv::VideoCapture camera;
cv::Mat cameraFrame;
int cameraNumber = 0;
camera.open(cameraNumber);
camera >> cameraFrame;
cv::Mat mask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
cv::Mat clearMask = cv::Mat::zeros(cameraFrame.size(), CV_8UC1);
if (!camera.isOpened())
{
std::cerr << "ERROR: Could not access the camera or video!" << std::endl;
}
cv::setMouseCallback(name, mouseCallback, &cameraFrame);
while(true)
{
if (destroyBox)
{
cv::destroyAllWindows();
break;
}
camera >> cameraFrame;
if (cameraFrame.empty())
{
std::cerr << "ERROR: Could not grab a camera frame." << std::endl;
exit(1);
}
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame);
camera >> prevFrame;
cv::cvtColor(prevFrame, prevFrame_1C, cv::COLOR_BGR2GRAY);
camera.set(CV_CAP_PROP_POS_FRAMES, currentFrame ++);
camera >> nextFrame;
cv::cvtColor(nextFrame, nextFrame_1C, cv::COLOR_BGR2GRAY);
if (targetAcquired)
{
cv::Mat roi (mask, cv::Rect(box.x, box.y, box.width, box.height));
roi = cv::Scalar(255, 255, 255);
findFeatures(mask);
clearMask.copyTo(mask);
trackFeatures();
}
draw(cameraFrame, box);
cv::imshow(name, cameraFrame);
cv::waitKey(20);
}
cv::destroyWindow(name);
return 0;
}