Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

multiple object detenction 2d camera findHomography

      if(imagess)
  {


      //Initialise the Wrapping Class for Surf()
      //FastFeatureDetector detector (600);
     // MserFeatureDetector detector (1000);

      OrbFeatureDetector detector (5000);
       // SurfFeatureDetector detector(60);
      //SiftFeatureDetector detector(2000);

      //detect : first param: Image, second param: vector (output)

        //vector<KeyPoint> keypoints1,keypoints2;

        detector.detect(riscontro,keypoints1);
        detector.detect(imagess,keypoints2);

      //Initialise wrapping class for descriptors computing using SURF() class.
        //FREAK.DescriptorExtractor extractor;
        OrbDescriptorExtractor extractor;
        //BriefDescriptorExtractor extractor;
        //SurfDescriptorExtractor extractor;
        //SiftFeatureDetector extractor;

      //Compute: Input:image, keypoints Output:descriptors


        extractor.compute(riscontro,keypoints1,descriptors1);
        extractor.compute(imagess,keypoints2,descriptors2);

        if (!descriptors1.empty() & !descriptors2.empty())
        {

      //Initialise BruteForceMatcher: For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each on (=brute)
        //cv::BFMatcher matcher(cv::NORM_L2, true);
          //FlannBasedMatcher matcher(FLANN_DISTANCE_CHECK);
        BFMatcher matcher(NORM_HAMMING2, true);//FlannBasedMatcher matcher; //BFMatcher matcher(NORM_L2);
        //std::vector<std::vector< DMatch > > matches;
        std::vector< DMatch > matches;
       // matcher.knnMatch(descriptors1,descriptors2, matches );
      //match: execute the matcher!
        matcher.match(descriptors1,descriptors2, matches);




       double max_dist = 0; double min_dist = 100;

        //-- Quick calculation of max and min distances between keypoints
        for( int ui = 0; ui < descriptors1.rows; ui++ )
        { double dist = matches[ui].distance;
          if( dist < min_dist ) min_dist = dist;
          if( dist > max_dist ) max_dist = dist;
        }


         std::vector< DMatch > good_matches;
        /* good_matches.reserve(matches.size());


         for (size_t i = 0; i < matches.size(); ++i)
         {
             if (matches[i].size() < 2)
                         continue;

             const DMatch &m1 = matches[i][0];
             const DMatch &m2 = matches[i][1];

             if(m1.distance <= nndrRatio * m2.distance)
             good_matches.push_back(m1);
         }*/


        //std::vector<vector<DMatch> > good_matches;
       //***** std::vector< DMatch > good_matches;
      /*  double RatioT = 0.75;
       //-- ratio Test
       for(int ai=0; ai<matches.size(); ai++)
       {
       if((matches[ai].size()==1)||(abs(matches[ai][0].distance/matches[ai][1].distance) < RatioT))
       {
           good_matches.push_back(matches[ai]);
        }
       }*/


       for( int ai = 0; ai < descriptors1.rows; ai++ )
        { if( matches[ai].distance  < 3*min_dist ) // <= max(2*min_dist, 0.02) )
            { good_matches.push_back(matches[ai]); }
        }


      //Draw the matches with drawMatches

        //findHomography(riscontro, imagess, CV_RANSAC);
       //for(int iw=0;iw<good_matches.size();iw++)
           drawMatches(riscontro,keypoints1,imagess,keypoints2,good_matches,target1, Scalar::all(-1), Scalar::all(-1),
                    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
        //Size size(640,480);
        //resize(target1,target,size);//resize image

        //-- Localize the object
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;

        for( unsigned int ki = 0; ki < good_matches.size(); ki++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( keypoints1[ good_matches[ki].queryIdx ].pt );
          scene.push_back( keypoints2[ good_matches[ki].trainIdx ].pt );
          kglobalpoint.push_back( keypoints2[ good_matches[ki].trainIdx ].pt );
        }

        if (obj.size() >= 4){

        Mat H = findHomography( obj, scene, CV_LMEDS, 5.0 );


        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( riscontro.cols, 0 );
        obj_corners[2] = cvPoint( riscontro.cols, riscontro.rows ); obj_corners[3] = cvPoint( 0, riscontro.rows );
        std::vector<Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);



        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        Point2f offset( (float)riscontro.cols, 0);
        line( target1, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
        line( target1, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
        line( target1, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
        line( target1, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

        }

        //-- Show detected matches
        Size size(640,480);
        resize(target1,target,size);//resize image

here my test code .... I work on QT c++ ... the result for 1 only object is barely passable, bur for multiple object of same type is very bad .... the keypoints are correcly drawing but the square around the object not appear and i see only flashes of light green .... where are my multyple (I suppose) mystake?

multiple object detenction 2d camera findHomography

      if(imagess)
  {


      //Initialise the Wrapping Class for Surf()
      //FastFeatureDetector detector (600);
     // MserFeatureDetector detector (1000);

      OrbFeatureDetector detector (5000);
       // SurfFeatureDetector detector(60);
      //SiftFeatureDetector detector(2000);

      //detect : first param: Image, second param: vector (output)

        //vector<KeyPoint> keypoints1,keypoints2;

        detector.detect(riscontro,keypoints1);
        detector.detect(imagess,keypoints2);

      //Initialise wrapping class for descriptors computing using SURF() class.
        //FREAK.DescriptorExtractor extractor;
        OrbDescriptorExtractor extractor;
        //BriefDescriptorExtractor extractor;
        //SurfDescriptorExtractor extractor;
        //SiftFeatureDetector extractor;

      //Compute: Input:image, keypoints Output:descriptors


        extractor.compute(riscontro,keypoints1,descriptors1);
        extractor.compute(imagess,keypoints2,descriptors2);

        if (!descriptors1.empty() & !descriptors2.empty())
        {

      //Initialise BruteForceMatcher: For each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each on (=brute)
        //cv::BFMatcher matcher(cv::NORM_L2, true);
          //FlannBasedMatcher matcher(FLANN_DISTANCE_CHECK);
        BFMatcher matcher(NORM_HAMMING2, true);//FlannBasedMatcher matcher; //BFMatcher matcher(NORM_L2);
        //std::vector<std::vector< DMatch > > matches;
        std::vector< DMatch > matches;
       // matcher.knnMatch(descriptors1,descriptors2, matches );
      //match: execute the matcher!
        matcher.match(descriptors1,descriptors2, matches);




       double max_dist = 0; double min_dist = 100;

        //-- Quick calculation of max and min distances between keypoints
        for( int ui = 0; ui < descriptors1.rows; ui++ )
        { double dist = matches[ui].distance;
          if( dist < min_dist ) min_dist = dist;
          if( dist > max_dist ) max_dist = dist;
        }


         std::vector< DMatch > good_matches;
        /* good_matches.reserve(matches.size());


         for (size_t i = 0; i < matches.size(); ++i)
         {
             if (matches[i].size() < 2)
                         continue;

             const DMatch &m1 = matches[i][0];
             const DMatch &m2 = matches[i][1];

             if(m1.distance <= nndrRatio * m2.distance)
             good_matches.push_back(m1);
         }*/


        //std::vector<vector<DMatch> > good_matches;
       //***** std::vector< DMatch > good_matches;
      /*  double RatioT = 0.75;
       //-- ratio Test
       for(int ai=0; ai<matches.size(); ai++)
       {
       if((matches[ai].size()==1)||(abs(matches[ai][0].distance/matches[ai][1].distance) < RatioT))
       {
           good_matches.push_back(matches[ai]);
        }
       }*/


       for( int ai = 0; ai < descriptors1.rows; ai++ )
        { if( matches[ai].distance  < 3*min_dist ) // <= max(2*min_dist, 0.02) )
            { good_matches.push_back(matches[ai]); }
        }


      //Draw the matches with drawMatches

        //findHomography(riscontro, imagess, CV_RANSAC);
       //for(int iw=0;iw<good_matches.size();iw++)
           drawMatches(riscontro,keypoints1,imagess,keypoints2,good_matches,target1, Scalar::all(-1), Scalar::all(-1),
                    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
        //Size size(640,480);
        //resize(target1,target,size);//resize image

        //-- Localize the object
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;

        for( unsigned int ki = 0; ki < good_matches.size(); ki++ )
        {
          //-- Get the keypoints from the good matches
          obj.push_back( keypoints1[ good_matches[ki].queryIdx ].pt );
          scene.push_back( keypoints2[ good_matches[ki].trainIdx ].pt );
          kglobalpoint.push_back( keypoints2[ good_matches[ki].trainIdx ].pt );
        }

        if (obj.size() >= 4){

        Mat H = findHomography( obj, scene, CV_LMEDS, 5.0 );


        //-- Get the corners from the image_1 ( the object to be "detected" )
        std::vector<Point2f> obj_corners(4);
        obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( riscontro.cols, 0 );
        obj_corners[2] = cvPoint( riscontro.cols, riscontro.rows ); obj_corners[3] = cvPoint( 0, riscontro.rows );
        std::vector<Point2f> scene_corners(4);

        perspectiveTransform( obj_corners, scene_corners, H);



        //-- Draw lines between the corners (the mapped object in the scene - image_2 )
        Point2f offset( (float)riscontro.cols, 0);
        line( target1, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
        line( target1, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
        line( target1, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
        line( target1, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );

        }

        //-- Show detected matches
        Size size(640,480);
        resize(target1,target,size);//resize image

here my test code .... I work on QT c++ tharead ... the result for 1 only object is barely passable, bur for multiple object of same type is very bad .... the keypoints are correcly drawing but the square around the object not appear and i see only flashes of light green .... where are my multyple (I suppose) mystake?