Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

nan's in stitching_detailed example due to camera estimation?

I've tried using the stitching_detailed example and while it works for a small set of images for more numerous cases it doesn't (e.g. >15). I find that in the camera estimation matrix log messages it is returning nan's. This of course means no panorama. I'm quite new to image processing so I'm struggling to understand what is going on here but I think it is to do with estimation of the focal length from the camera matrix.

I've looked for help in the docs but only found the opencvdoc API pages. I've therefore dug into the code in the stitching module and had a look at the offending bit in motion_estimators.cpp (code at the bottom).

However, can anyone help me in understanding with what is going on in that bit of code (below). In particular why is there a commented out bit (#if 0 to #endif) and what advantage would that give me? Also would this code be expected to be numerically unstable - probably most likely to be unstable in the call that reaches focalsFromHomography()?

Finally does anybody know of any explanatory docs to explain focalsFromHomography? It doesn't look like a factorisation of the camera matrix to me (which it possibly is in a different function where it starts talking about cholesky decomp). However, I'm very new to image processing so don't really know what I'm talking about here.

Many thanks for any tips on this

void HomographyBasedEstimator::estimate(const vector<imagefeatures> &features, const vector<matchesinfo> &pairwise_matches, vector<cameraparams> &cameras) { LOGLN("Estimating rotations...");

if ENABLE_LOG

int64 t = getTickCount();

endif

const int num_images = static_cast<int>(features.size());

if 0

// Robustly estimate focal length from rotating cameras
vector<Mat> Hs;
for (int iter = 0; iter < 100; ++iter)
{
    int len = 2 + rand()%(pairwise_matches.size() - 1);
    vector<int> subset;
    selectRandomSubset(len, pairwise_matches.size(), subset);
    Hs.clear();
    for (size_t i = 0; i < subset.size(); ++i)
        if (!pairwise_matches[subset[i]].H.empty())
            Hs.push_back(pairwise_matches[subset[i]].H);
    Mat_<double> K;
    if (Hs.size() >= 2)
    {
        if (calibrateRotatingCamera(Hs, K))
            cin.get();
    }
}

endif

if (!is_focals_estimated_)
{
    // Estimate focal length and set it for all cameras
    vector<double> focals;
    estimateFocal(features, pairwise_matches, focals);
    cameras.assign(num_images, CameraParams());
    for (int i = 0; i < num_images; ++i)
        cameras[i].focal = focals[i];
}
else
{
    for (int i = 0; i < num_images; ++i)
    {
        cameras[i].ppx -= 0.5 * features[i].img_size.width;
        cameras[i].ppy -= 0.5 * features[i].img_size.height;
    }
}

// Restore global motion
Graph span_tree;
vector<int> span_tree_centers;
findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));

// As calculations were performed under assumption that p.p. is in image center
for (int i = 0; i < num_images; ++i)
{
    cameras[i].ppx += 0.5 * features[i].img_size.width;
    cameras[i].ppy += 0.5 * features[i].img_size.height;
}

LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

}

click to hide/show revision 2
No.2 Revision

updated 2013-04-30 15:39:49 -0600

berak gravatar image

nan's in stitching_detailed example due to camera estimation?

I've tried using the stitching_detailed example and while it works for a small set of images for more numerous cases it doesn't (e.g. >15). I find that in the camera estimation matrix log messages it is returning nan's. This of course means no panorama. I'm quite new to image processing so I'm struggling to understand what is going on here but I think it is to do with estimation of the focal length from the camera matrix.

I've looked for help in the docs but only found the opencvdoc API pages. I've therefore dug into the code in the stitching module and had a look at the offending bit in motion_estimators.cpp (code at the bottom).

However, can anyone help me in understanding with what is going on in that bit of code (below). In particular why is there a commented out bit (#if 0 to #endif) and what advantage would that give me? Also would this code be expected to be numerically unstable - probably most likely to be unstable in the call that reaches focalsFromHomography()?

Finally does anybody know of any explanatory docs to explain focalsFromHomography? It doesn't look like a factorisation of the camera matrix to me (which it possibly is in a different function where it starts talking about cholesky decomp). However, I'm very new to image processing so don't really know what I'm talking about here.

Many thanks for any tips on this

void HomographyBasedEstimator::estimate(const vector<imagefeatures> vector<ImageFeatures> &features, const vector<matchesinfo> vector<MatchesInfo> &pairwise_matches,
                                        vector<cameraparams> vector<CameraParams> &cameras)
{
    LOGLN("Estimating rotations...");

if ENABLE_LOG

rotations...");
#if ENABLE_LOG
    int64 t = getTickCount();

endif

#endif

    const int num_images = static_cast<int>(features.size());

if 0


#if 0
    // Robustly estimate focal length from rotating cameras
 vector<Mat> Hs;
 for (int iter = 0; iter < 100; ++iter)
 {
     int len = 2 + rand()%(pairwise_matches.size() - 1);
     vector<int> subset;
     selectRandomSubset(len, pairwise_matches.size(), subset);
     Hs.clear();
     for (size_t i = 0; i < subset.size(); ++i)
         if (!pairwise_matches[subset[i]].H.empty())
             Hs.push_back(pairwise_matches[subset[i]].H);
     Mat_<double> K;
     if (Hs.size() >= 2)
     {
         if (calibrateRotatingCamera(Hs, K))
             cin.get();
     }
 }

endif

#endif

    if (!is_focals_estimated_)
 {
     // Estimate focal length and set it for all cameras
     vector<double> focals;
     estimateFocal(features, pairwise_matches, focals);
     cameras.assign(num_images, CameraParams());
     for (int i = 0; i < num_images; ++i)
         cameras[i].focal = focals[i];
 }
 else
 {
     for (int i = 0; i < num_images; ++i)
     {
         cameras[i].ppx -= 0.5 * features[i].img_size.width;
         cameras[i].ppy -= 0.5 * features[i].img_size.height;
     }
 }

 // Restore global motion
 Graph span_tree;
 vector<int> span_tree_centers;
 findMaxSpanningTree(num_images, pairwise_matches, span_tree, span_tree_centers);
 span_tree.walkBreadthFirst(span_tree_centers[0], CalcRotation(num_images, pairwise_matches, cameras));

 // As calculations were performed under assumption that p.p. is in image center
 for (int i = 0; i < num_images; ++i)
 {
     cameras[i].ppx += 0.5 * features[i].img_size.width;
     cameras[i].ppy += 0.5 * features[i].img_size.height;
 }

 LOGLN("Estimating rotations, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
}

}