1 | initial version |
If I understood your question correctly and since you said nothing about camera calibrations, if you just want to find the distance between those two objects then simply
Find the respective contours of your masked objects. Python, C++
Extract the mass centers (refer to the links posted above)
Then calculate the euclidean distance between the mass centers.
Try out the solution proposed by @sjhalayka as well.
2 | No.2 Revision |
If I understood your question correctly and since you said nothing about camera calibrations, if you just want to find the distance between those two objects then simply
Find the respective contours of your masked objects. Python, C++
Extract the mass centers (refer to the links posted above)
Then calculate the euclidean distance between the mass centers.
Try out the solution proposed by @sjhalayka as well.
EDIT
Most of the heavy lifting was done by @sjhalayka and I just simplified the logic. The original code can be found here in without_contours.cpp
A bulk of the change was getting rid of his/her use of sets and maps just to keep track of the two largest contours. Other minor edits have been made as well and stated in the comments.
Disclaimer: The logic shown below was tailored to OP's current question. Should he add an extra object and want to find distances of all three, the same logic shown below can be used but its implementation details have been left out as a challenge for him to tackle.
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
// since its just two elements, then a tuple return should suffice.
tuple<size_t, size_t> getTheTwoLargestContours(const vector<vector<Point>>& contours)
{
size_t largestLoc = 0, secondLargestLoc = 0;
double largestArea = 0, secondLargestArea = 0;
for(size_t index = 0; index < contours.size(); ++index)
{
double area = contourArea(contours[index]);
// first check to see if this area is the largest
if(area > largestArea)
{
largestArea = area;
largestLoc = index;
}
else if(area > secondLargestArea) // if not then its probably the second largest, maybe?
{
secondLargestArea = area;
secondLargestLoc = index;
}
}
return make_tuple(largestLoc, secondLargestLoc);
}
int main(void)
{
// OpenCV is shying away from using the CV prefix
Mat frame = imread("cards.png", IMREAD_GRAYSCALE);
threshold(frame, frame, 127, 255, THRESH_BINARY);
imshow("f", frame);
Mat flt_frame(frame.rows, frame.cols, CV_32F);
for (int j = 0; j < frame.rows; j++)
for (int i = 0; i < frame.cols; i++)
flt_frame.at<float>(j, i) = frame.at<unsigned char>(j, i) / 255.0f;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(frame, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
if(contours.size() < 2)
{
cout << "Error: must have 2 or more contours." << endl;
return 0;
}
tuple<size_t, size_t> locations = getTheTwoLargestContours(contours);
/*
std::get<index>(tuple) is how you access tuple elements.
Read more here: http://www.geeksforgeeks.org/tuples-in-c/
Index 0 of locations is where the largest contour is located.
So I calculate its moment along with its center.
This logic can be converted into a loop should more contours be needed.
*/
Moments mu = moments(contours[get<0>(locations)], false);
Point2d largestMassCenter = Point2d(mu.m10 / mu.m00, mu.m01 / mu.m00);
// do the same thing for the second largest
mu = moments(contours[get<1>(locations)], false);
Point2d secondLargestMassCenter = Point2d(mu.m10 / mu.m00, mu.m01 / mu.m00);
// OpenCV has a norm function for calculating the Euclidean distance
double distance = norm(largestMassCenter - secondLargestMassCenter);
cout << "Distance (in pixels): " << distance << endl;
Mat output = Mat::zeros(flt_frame.size(), CV_8UC3);
// passing -1 draws all contours so no need for the for loop and repetitive function calls
drawContours(output, contours, -1, Scalar(0, 127, 255), 1, 8, hierarchy);
line(output, largestMassCenter, secondLargestMassCenter, Scalar(255, 127, 0), 2);
imshow("f2", output);
waitKey();
destroyAllWindows();
return 0;
}