Ask Your Question

januka's profile - activity

2013-05-17 08:41:16 -0600 asked a question Building 3d model build3dmodel.cpp

This is regarding the sample code shipped with opencv 'build3dmodel.cpp' https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/build3dmodel.cpp?rev=3220 . I am hoping to do a project on 3d reconstruction using opencv, but before that i would love to test a sample project like this. I have configured OpenCV on Visual Studios 2010. I am new to OpenCV, Please do tell me how to give inputs( I have a test image set) to this programme to test ( specify the lines please..) this is a GREAT help!

2013-05-17 06:48:55 -0600 asked a question Vector subscript out of range , programme crash
  • This programme doesn't show any errors in compiler, but there is 1 warning warning C4018: '<' : signed/unsigned mismatch "

  • Also when i run the programme it
    crashes saying "Vector subscript
    out of range
    "

I really need your help to track this error :(

#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\video\video.hpp"
#include <iostream>
#include <string>
#include <vector>
#include <set>
using namespace cv;
using namespace std;
int main()
{
    Mat img1 = imread("0000.jpg", 1);
    Mat img2 = imread("0001.jpg", 1);
    vector<Point2f> j_pts;
    vector<DMatch>* matches;
    vector<Point2f> to_find;
    // Detect keypoints in the left and right images
    FastFeatureDetector detector(50);
    vector<KeyPoint> left_keypoints,right_keypoints;
    detector.detect(img1, left_keypoints);
    detector.detect(img2, right_keypoints);
    vector<Point2f>left_points;
    //KeyPointsToPoints(left_keypoints,left_points);
    KeyPoint::convert(left_keypoints,left_points);
    vector<Point2f>right_points(left_points.size());
    // making sure images are grayscale

    Mat prevgray,gray;
    if (img1.channels() == 3) {
        cvtColor(img1,prevgray,CV_RGB2GRAY);
        cvtColor(img2,gray,CV_RGB2GRAY);
    } else {
        prevgray = img1;
        gray = img2;
    }
    // Calculate the optical flow field:
    // how each left_point moved across the 2 images
    vector<uchar>vstatus; vector<float>verror;
    calcOpticalFlowPyrLK(prevgray, gray, left_points, right_points,vstatus, verror);
    // First, filter out the points with high error
    vector<Point2f>right_points_to_find;
    vector<int>right_points_to_find_back_index;
    for (unsigned int i=0; i<vstatus.size(); i++) {
        if (vstatus[i] &&verror[i] < 12.0) {
            // Keep the original index of the point in the
            // optical flow array, for future use
            right_points_to_find_back_index.push_back(i);
            // Keep the feature point itself
            right_points_to_find.push_back(j_pts[i]);
        } else {
            vstatus[i] = 0; // a bad flow
        }
    }
    // for each right_point see which detected feature it belongs to
    Mat right_points_to_find_flat = Mat(right_points_to_find).reshape(1,to_find.size()); //flatten array
    vector<Point2f>right_features; // detected features
    KeyPoint::convert(right_keypoints,right_features);
    Mat right_features_flat = Mat(right_features).reshape(1,right_features.size());
    // Look around each OF point in the right image
    // for any features that were detected in its area
    // and make a match.
    BFMatcher matcher(CV_L2);
    vector<vector<DMatch>>nearest_neighbors;
    matcher.radiusMatch(right_points_to_find_flat,right_features_flat,nearest_neighbors,2.0f);
    // Check that the found neighbors are unique (throw away neighbors
    // that are too close together, as they may be confusing)

    std::set<int>found_in_right_points; // for duplicate prevention
    for(int i=0;i<nearest_neighbors.size();i++) {
        DMatch _m;
        if(nearest_neighbors[i].size()==1) {
            _m = nearest_neighbors[i][0]; // only one neighbor
        } else if(nearest_neighbors[i].size()>1) {
            // 2 neighbors – check how close they are
            double ratio = nearest_neighbors[i][0].distance /
                nearest_neighbors[i][1].distance;
            if(ratio < 0.7) { // not too close
                // take the closest (first) one
                _m = nearest_neighbors[i][0];
            } else { // too close – we cannot tell which is better
                continue; // did not pass ratio test – throw away
            }
        } else {
            continue; // no neighbors... :(
        }
        // prevent duplicates
        if (found_in_right_points.find(_m.trainIdx) == found_in_right_points.
                end()) {
            // The found neighbor was not yet used:
            // We should match it with the original indexing
            // ofthe left point
            _m.queryIdx = right_points_to_find_back_index[_m.queryIdx];
            matches->push_back(_m); // add this match
            found_in_right_points.insert(_m.trainIdx);
        }
    }
    cout<<"pruned "<< matches->size() <<" / "<<nearest_neighbors.size()
        <<" matches"<<endl;
    return 0;
}
2013-05-17 06:29:01 -0600 commented answer OpenCV Optical Flow Point matching, tiny error

THank you,that removed 1 warning. "warning C4018: '<' : signed/unsigned mismatch " is still there :( When i run the programme it crashes saying 'Vector subscript out of range'

2013-05-17 06:27:36 -0600 received badge  Scholar (source)
2013-05-17 06:27:35 -0600 received badge  Supporter (source)
2013-05-17 04:08:29 -0600 asked a question OpenCV Optical Flow Point matching, tiny error

I am trying to simulate Optical Flow using 2 images. This program give no compilation error,but it crashes when i run it. However i have 2 warning. I think the error could be in vector variable initialization or conversion

warning C4018: '<' : signed/unsigned mismatch warning C4700: uninitialized local variable 'matches' used

#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\video\video.hpp"
#include <iostream>
#include <string>
#include <vector>
#include <set>
using namespace cv;
using namespace std;
int main()
{
  Mat img1 = imread("0000.jpg", 1);
  Mat img2 = imread("0001.jpg", 1);
  vector<Point2f> j_pts;
  vector<DMatch>* matches;
  vector<Point2f> to_find;
 // Detect keypoints in the left and right images
FastFeatureDetector detector(50);
vector<KeyPoint> left_keypoints,right_keypoints;
detector.detect(img1, left_keypoints);
detector.detect(img2, right_keypoints);
vector<Point2f>left_points;
//KeyPointsToPoints(left_keypoints,left_points);
KeyPoint::convert(left_keypoints,left_points);
vector<Point2f>right_points(left_points.size());
// making sure images are grayscale

Mat prevgray,gray;
if (img1.channels() == 3) {
cvtColor(img1,prevgray,CV_RGB2GRAY);
cvtColor(img2,gray,CV_RGB2GRAY);
} else {
prevgray = img1;
gray = img2;
}
// Calculate the optical flow field:
// how each left_point moved across the 2 images
vector<uchar>vstatus; vector<float>verror;
calcOpticalFlowPyrLK(prevgray, gray, left_points, right_points,vstatus, verror);
// First, filter out the points with high error
vector<Point2f>right_points_to_find;
vector<int>right_points_to_find_back_index;
for (unsigned int i=0; i<vstatus.size(); i++) {
if (vstatus[i] &&verror[i] < 12.0) {
// Keep the original index of the point in the
// optical flow array, for future use
right_points_to_find_back_index.push_back(i);
// Keep the feature point itself
right_points_to_find.push_back(j_pts[i]);
} else {
vstatus[i] = 0; // a bad flow
}
}
// for each right_point see which detected feature it belongs to
Mat right_points_to_find_flat = Mat(right_points_to_find).reshape(1,to_find.size()); //flatten array
vector<Point2f>right_features; // detected features
KeyPoint::convert(right_keypoints,right_features);
Mat right_features_flat = Mat(right_features).reshape(1,right_features.size());
// Look around each OF point in the right image
// for any features that were detected in its area
// and make a match.
BFMatcher matcher(CV_L2);
vector<vector<DMatch>>nearest_neighbors;
matcher.radiusMatch(right_points_to_find_flat,right_features_flat,nearest_neighbors,2.0f);
// Check that the found neighbors are unique (throw away neighbors
// that are too close together, as they may be confusing)

std::set<int>found_in_right_points; // for duplicate prevention
for(int i=0;i<nearest_neighbors.size();i++) {
DMatch _m;
if(nearest_neighbors[i].size()==1) {
_m = nearest_neighbors[i][0]; // only one neighbor
} else if(nearest_neighbors[i].size()>1) {
// 2 neighbors – check how close they are
double ratio = nearest_neighbors[i][0].distance /
nearest_neighbors[i][1].distance;
if(ratio < 0.7) { // not too close
// take the closest (first) one
_m = nearest_neighbors[i][0];
} else { // too close – we cannot tell which is better
continue; // did not pass ratio test – throw away
}
} else {
continue; // no neighbors... :(
}
// prevent duplicates
if (found_in_right_points.find(_m.trainIdx) == found_in_right_points.
end()) {
// The found neighbor was not yet used:
// We should match it with the original indexing
// ofthe left point
_m.queryIdx = right_points_to_find_back_index[_m.queryIdx];
matches->push_back(_m); // add this match
found_in_right_points.insert(_m.trainIdx);
}
}
cout ...
(more)