Ask Your Question

stan123's profile - activity

2019-12-19 05:13:11 -0600 received badge  Notable Question (source)
2018-12-27 18:51:26 -0600 received badge  Popular Question (source)
2017-03-29 01:58:45 -0600 received badge  Student (source)
2017-03-28 07:52:13 -0600 commented question Contour segmentation

so I solved the problem with calculate the curvatur of the contour points (reffered to (http://stackoverflow.com/questions/32...)

2017-03-23 06:46:38 -0600 commented question Contour segmentation

so the upper images are just to show the problem. The real forms can be more complicate. To find the contures isn't a problem. But I don't know how I can divide the contour in segments.

2017-03-23 03:56:00 -0600 asked a question Contour segmentation

Hey there,

I have a contour which consists of curved segments and straigth segments. Is there any possibility to segment the contour into the curved and straigth parts? So this is an example for a contour

image description

I would like to have a segmentation like this:

image description

Do you have any idea how I could solve such a problem

Thank you very much and best regards

The solution is to calculate the curvature:

vector<double> getCurvature(vector<Point> const& tContourPoints, int tStepSize)
{
int iplus;
int iminus;

double acurvature;
double adivisor;

Point2f pplus;
Point2f pminus;
Point2f a1stDerivative;
Point2f a2ndDerivative;

vector< double > rVecCurvature( tContourPoints.size() );

if ((int)tContourPoints.size() < tStepSize)
{
      return rVecCurvature;
}

for (int i = 0; i < (int)tContourPoints.size(); i++ )
{
const Point2f& pos = tContourPoints[i];

iminus  = i-tStepSize;
iplus   = i+tStepSize;

if(iminus < 0)
{
    pminus = tContourPoints[iminus + tContourPoints.size()];
}
else
{
    pminus = tContourPoints[iminus];
}

if(iplus  > (int)tContourPoints.size())
{
    pplus = tContourPoints[iplus - (int)tContourPoints.size()];
}
else
{
    pplus = tContourPoints[iplus];
}

a1stDerivative.x = (pplus.x -           pminus.x) / ( iplus-iminus);
a1stDerivative.y = (pplus.y -           pminus.y) / ( iplus-iminus);
a2ndDerivative.x = (pplus.x - 2*pos.x + pminus.x) / ((iplus-iminus)/2*(iplus-iminus)/2);
a2ndDerivative.y = (pplus.y - 2*pos.y + pminus.y) / ((iplus-iminus)/2*(iplus-iminus)/2);


adivisor = a2ndDerivative.x*a2ndDerivative.x + a2ndDerivative.y*a2ndDerivative.y;
if (  abs(adivisor) > 10e-8 )
{
    acurvature =   abs(a2ndDerivative.y*a1stDerivative.x - a2ndDerivative.x*a1stDerivative.y) / pow(adivisor, 3.0/2.0 )  ;
}
else
{
    acurvature =  numeric_limits<double>::infinity();
}

rVecCurvature[i] = acurvature;
}
return rVecCurvature;
}

and the you can segment it with a specific border value

2017-01-12 23:54:30 -0600 commented question Problems with feature detection and Homography

I'm sory yesterday I wasn't able to attach the files. Today I could ^^

2017-01-12 07:46:58 -0600 asked a question Problems with feature detection and Homography

Hey there,

I found the example for feature detection and homography here:

So I copied the code and tried it with my own pictures. But it don't want to work. So I get suitable features in my object image, but in the scene image there are quite to less features for the matching I guess...

int main(int argc, char** argv)
{

Mat img_object = imread("/data/Images/HappyLittleFish_teil.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_scene = imread("/data/Images/HappyLittleFish.png", CV_LOAD_IMAGE_GRAYSCALE);

if (!img_object.data || !img_scene.data)
{
    std::cout << " --(!) Error reading images " << std::endl; return -1;
}

//-- Step 1: Detect the keypoints using SURF Detector
Ptr<FeatureDetector> detector = FeatureDetector::create("ORB");

std::vector<KeyPoint> keypoints_object, keypoints_scene;

detector->detect(img_object, keypoints_object);
detector->detect(img_scene, keypoints_scene);

//-- Step 2: Calculate descriptors (feature vectors)
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB");

Mat descriptors_object, descriptors_scene;

extractor->compute(img_object, keypoints_object, descriptors_object);
extractor->compute(img_scene, keypoints_scene, descriptors_scene);

//-- Step 3: Matching descriptor vectors
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
std::vector< DMatch > matches;
matcher->match(descriptors_object, descriptors_scene, matches);

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
    double dist = matches[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
}

printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;

for (int i = 0; i < descriptors_object.rows; i++)
{
    if (matches[i].distance < 8 * min_dist)
    {
        good_matches.push_back(matches[i]);
    }
}

Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;

for (int i = 0; i < good_matches.size(); i++)
{
    //-- Get the keypoints from the good matches
    obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
    scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}

Mat H = findHomography(obj, scene, CV_RANSAC);

//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows);
obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);

perspectiveTransform(obj_corners, scene_corners, H);

//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);

//-- Show detected matches
namedWindow( "img", WINDOW_NORMAL );
imshow("img", img_matches);

waitKey(0);
return 0;
}

My scene image is HappyLittleFish.png image description

The object image is just a part from the scene image like the fin of the fish. image description

So does anyone knows were the problem is??

2016-11-24 09:27:38 -0600 commented question compare the size of two contours

Thanks for the very helpfull answers. My problem was the factor, which was not correct, so the area and the length of the contour wasn't correct to :-/ sorry for that ^^

2016-11-24 09:25:23 -0600 asked a question skewness of a contour

Hey there :-)

I calculated the orientation of a contour with the image moments. As a result I get an angle which is defined between -pi and pi. But this angle has an indetermination at -pi and pi. So my idea was to calculate the skewness with the 3rd order image moments, but I don't really know how to do that. Here link text the author does the same. But I don't really understand how he calculates the skewness because he/she uses a matlab function, which I don't know :-/

Does anybody knows more about that problem? I would be very thankfull for your help!!!! Best regards

2016-11-22 00:06:55 -0600 received badge  Enthusiast
2016-11-21 04:46:54 -0600 commented question compare the size of two contours

The calibration step is outside of opencv. I just get the factors and work with them

2016-11-21 04:26:50 -0600 commented question compare the size of two contours

Thanks for the response :-)

 mImage = imread( tImage );
 findContours( mImage,con_c1, RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point( 0, 0 ) );
 double l_c1 = arcLength(con_1, true);

So this is what I do at the moment. The second contour is the same way. What do you mean with float Points?

2016-11-21 04:01:02 -0600 commented question compare the size of two contours

So I thougth l_c1 and l_c2 should be nearly the same but they ar not :-/

2016-11-21 04:00:35 -0600 commented question compare the size of two contours
  • why shouldn't I write loops like that -> performance?
    • I thougth I just fill the contour with wihite pixel the rest should be black
    • yes think you are rigth ^^
    • I don't rescale the images I just have the factor from a calibration step. So I know in the first image each mm represents 5 pixel in the image and in the second image each mm is represented by 8.71 pixel. In both images I have the nearly the contour of the same object and I want to know weather they have the same size (for example the area in mm^2 or the perimeter in mm) or not.

I tried arcLength in that way:

   double l_c1 = arcLength(con_1, true);
   double l_c2 = arcLength(con_2, true);
   cout << "l_c1 = " << l_c1/ fac1 << endl; //fac1 = 8.71
   cout << "l_c2 = " << l_c2/ fac2 << endl; //fac2 = 5
2016-11-21 03:42:30 -0600 commented question compare the size of two contours

Oh sorry I forget to define that. I mean the area of both contours. So I will try contourArea and maybe Hu Moments? Thank yo for your response!

2016-11-21 03:31:29 -0600 asked a question compare the size of two contours

Hey there

I found the contours in two images. The frist image has a factor of 5 Pixel/mm. The second image has a factor of 8.71 Pixel/mm. I want to compare the size of the contours which I found in the two images. But therefore I've to rescale them so that they both have the same scale.

I tried to calculate pixel which are inside my contour and divide each with the factor but the result isn't correct

   Mat contoure_tmp = Mat::zeros( tImage.size(), CV_8UC3 );
   vector<Vec4i> ahierarchy;
   drawContours( contoure_tmp, tcontur, tcontour_index, Scalar(255,255,255), CV_FILLED, 8, ahierarchy, 0, Point() );
   imshow("tmp", contoure_tmp);
   waitKey(0);

   int aarea_pixel_count = 0;
   for( int y = 0; y < contoure_tmp.rows; y++ )
   {
      for( int x = 0; x < contoure_tmp.cols; x++ )
         {
            Vec3b &intensity = contoure_tmp.at < Vec3b > ( y, x );
            bool ablack = false;
            for( int k = 0; k < contoure_tmp.channels(); k++ )
               {
                  if(intensity.val[ k ] != 255)
                    {
                      ablack = true;
                    }
                }
            if (ablack == false)
            {
                aarea_pixel_count = aarea_pixel_count + 1;
            }
          }
    }

return aarea_pixel_count;

Does anyone has an idea how I can do this in another way? Thank you very much!!!

2016-11-18 07:10:40 -0600 received badge  Supporter (source)
2016-11-18 07:10:25 -0600 commented answer Orientation of two contours

thank you very much for your great answer. I finally solved the problems with the Hu Moments. :-)

2016-11-18 07:06:55 -0600 commented answer return value matchShapes(contours)

Thank you very much I tried Method 2 and it worked fine for my problem :-)

2016-11-18 07:04:38 -0600 received badge  Scholar (source)
2016-11-16 08:50:59 -0600 commented question Orientation of two contours

so i get the rigth angle with:

double atheta_res = (( -atheta_dxf_outer + atheta_img_outer ) /2;

But I have to add an offset of either 0 or pi/2 or poi or 3/pi*2. I tried to match the shapes with

matchShapes(...);

for each angle + offset (I thougth I get the smalles value if the contour has the rigth orientation). But the function return the smallest value altough it's not the contour with the rigth offset. So does anyone has an idea to solve this???

2016-11-16 07:56:24 -0600 asked a question return value matchShapes(contours)

I know that the return value of the matchShapes() function is different depending of the orientation of the contour. But I recognized that a twisted contour gets a smaller return value than a not twisted. How can this be?

2016-11-16 07:52:18 -0600 commented question Orientation of two contours

no they are not.

2016-11-16 04:27:30 -0600 asked a question Orientation of two contours

I try to calculate the orientation of 2 contours. At the end i want to rotate one contour, so it is in cover with the other one. With my code I get a result, but it isn't that accurate. Also I get the same orientations although the contours is rotated around 90 degrees.

Here is my code:

   Moments moments_dxf;
   Moments moments_img;

   moments_dxf = moments( mcontours_dxf[ alargest_contour_index_dxf ], true );
   double theta_dxf_outer = 0.5 * atan( ( 2 * moments_dxf_outer.mu11 ) / ( moments_dxf_outer.mu20 - moments_dxf_outer.mu02 ) );

   moments_img = moments( mcontours_image[ alargest_contour_index_img ], true );
   double theta_img_outer = 0.5 * atan( ( 2 * moments_img_outer.mu11 ) / ( moments_img_outer.mu20 - moments_img_outer.mu02 ) );

has anyone an idea?

2016-11-14 09:01:24 -0600 received badge  Editor (source)
2016-11-14 09:00:55 -0600 asked a question return value of matchShape

Hey there,

i tried to match two contours. In the documentation of OpenCV is an example which says the smaller the value the better the match is. But I in some positions I get smaller values although the both contours are twisted to each other and bigger values if the contours are in cover.

How can these be? Whta can I do to avoid this.

2016-11-14 03:08:55 -0600 asked a question Calculate the distortion of two nearly identical Contours with “matchShapes()” and rotation of one contour

I have two files: 1.A .bmp file which shows a contour of an object made by AutoCAD 2.A image of the object made by a camera

From both files I extracted the contour of the object with findContours(). So i got 2 contours. 1.Contour from the .bmp file 2.Contour from the object of the camera image

My problem is, that the object can be distorted in comparison to the contour from the .bmp file. My task is to get the angle between both contours.

I tried to show my problem with this picture. The lightning in the image is distorted in comparison to the contour from the .bmp file

My first approach was a bounded rectangle around both contours. This worked good but in some cases the bounded rectangle isn't singular.

In the second approach I rotate one contour about an specific delta a and then get the value from "matchShapes()". I think that I should get the smallest value, when i rotated exactly about the angle between the contours. But this didn't worked. Here is my code for the rotation:

int index_contour = 0;
 float ret_contour_rot = 10;
 //Dummy contour
 vector<vector<Point> > conture_bmp_rotated = mcontours_bmp;
 Point2f aSWP;
 Point2f aCP;
 Point2f aCP_rot;
 Point2f atranspt;

 for( double aDelAngle = 0; aDelAngle <= 2*3.14159; aDelAngle = aDelAngle + 0.01 )
 {
    aDelAngle = deg;
    // get moments
    Moments amoments;
    amoments = moments( mcontours_bmp[ index_contour ], false );

    //get center of gravity
    aSWP = Point2f( amoments.m10 / amoments.m00, amoments.m01 / amoments.m00 );

    for( int i = 0; i < (int) mcontours_bmp[ index_contour ].size(); i++ )
    {
       //get first contour point
       aCP = Point2f( mcontours_bmp[ index_contour ][ i ].x, mcontours_bmp[ index_contour ][ i ].y );

       //vector between contour point and center of gravity
       atranspt.x = aCP.x - aSWP.x;
       atranspt.y = aCP.y - aSWP.y;

       // get rotated contour point
       aCP_rot.x = cos( aDelAngle ) * atranspt.x - sin( aDelAngle ) * atranspt.y + aSWP.x;
       aCP_rot.y = sin( aDelAngle ) * atranspt.x + cos( aDelAngle ) * atranspt.y + aSWP.y;

       //copy point in new contour
       mcontours_bmp_rotated[ index_contour ][ i ].x = aCP_rot.x;
       mcontours_bmp_rotated[ index_contour ][ i ].y = aCP_rot.y;
    }

    float aret_contour_rot_act = matchShapes( conture_dxf_rotated[ index_contour ], mcontours_image[index_contour ], 1, 3 );

    // get the smallest return value from matchShapes
    if( aret_contour_rot_act < aret_contour_rot )
    {
       aret_contour_rot = aret_contour_rot_act;
    }
    cout << "rot ret: " << aret_contour_rot_act << endl;
    cout << "rot deg: " << deg << endl;
 }
 cout << "rot ret ende: " << aret_contour_rot << endl;
 cout << "rot deg ende: " << degree << endl;