Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

So I write the most important part of my code (not all because program is large but I think it will be enough)

Images
From left: left images
From right: right images

Intrinsic and extrinsic parameteres for left

cameraMat1 = 
    5.327949e+02      0        3.089701e+02 
         0       5.333043e+02  2.523206e+02 
         0            0            1 

distCoeffs1 = 
    8.571497e-02   
    -2.059629e-01 
    5.049960e-03 
    -3.563517e-03

rotation1 = 
     9.986447e-01    5.134855e-02     -8.494029e-03 
    -5.165607e-02    9.978162e-01     -4.116318e-02 
     6.361810e-03    4.154616e-02      9.991163e-01

projMat1 = 
     4.204745e+02        0         3.185035e+02   0 
           0        4.204745e+02   2.475893e+02   0 
           0             0              1         0

Intrinsic and extrinsic parameteres for right

cameraMat2 = 
    5.339174e+02      0        3.001284e+02 
         0       5.343352e+02  2.333189e+02 
         0            0            1 

distCoeffs2 = 
    7.593528e-02   
    -1.829142e-01 
    7.050366e-03 
    -4.569196e-03

rotation2 = 
     9.976312e-01    5.243198e-02     -4.452930e-02 
    -5.054383e-02    9.978163e-01     4.251988e-02 
     4.666146e-02   -4.016848e-02     9.981028e-01

projMat2 = 
     4.204745e+02        0         3.185035e+02   -2.708721e+03 
           0        4.204745e+02   2.475893e+02      0 
           0             0              1            0

1. Here is startOffline() method that starts all calculation:

void TrackSystem::startOffline()
{
  try
  {
    saveData(true); //it only sets up boolean variables to know what type of data should be saved

    for(int i = 0; i < m_imageList1.size(); i++)
    {
      cv::Mat* tempMat1 = &m_imageList1[i];
      cv::Mat* tempMat2 = &m_imageList2[i];

      IplImage* image1 = cvCloneImage(&(IplImage)(*tempMat1));
      IplImage* image2 = cvCloneImage(&(IplImage)(*tempMat2));

      stereovideocallback(image1, image2);  //see 2.
    }

    calcTriangulation2TEST(); //see first post
    saveData(false);
  }
  catch(const std::exception &e)
 {
    qDebug()<<"Exception: " + QString(e.what());
 }
 catch(...){}
}

2. Here is stereovideocallback() method that allows to detect markers and save data to m_history.

void TrackSystem::stereovideocallback(IplImage* image1, IplImage* image2)
{
  static IplImage *rgba;
  bool flip_image1 = (image1->origin?true:false);
  if(flip_image1)
  {
    cvFlip(image1);
    image1->origin = !image1->origin;
  }

  bool flip_image2 = (image2->origin?true:false);
  if(flip_image2)
  {
    cvFlip(image2);
    image2->origin = !image2->origin;
  }

  // Loading camera calibration
  if(m_init)
  {
    bool calibLoaded1 = loadCalibration(image1, true); //see 3.
    bool calibLoaded2 = loadCalibration(image2, false);
    if(!calibLoaded1 || !calibLoaded2)
      emit calibLoadFailure();
  }

  // Finding and processing markers 
  static MarkerDetector<MarkerData> marker_detector1; //Alvar library
  static MarkerDetector<MarkerData> marker_detector2;

  marker_detector1.SetMarkerSize(m_markerSize);
  marker_detector2.SetMarkerSize(m_markerSize);

  QString timeNow;
  if(m_isDataSaved)
  {
    timeNow = m_timeObj->getTimeString();
  }

  bool found1 = processMarkers(&marker_detector1, image1, 0); // see 4.
  bool found2 = processMarkers(&marker_detector2, image2, 1);

  if(found1 && found2)
  {
    calcStereoMarkers(image1, 0, &marker_detector1, timeNow); // see 5.
    calcStereoMarkers(image2, 1, &marker_detector2, timeNow);
  }

  if(m_init)
    m_init = false;

  if(flip_image1)
  {
    cvFlip(image1);
    image1->origin = !image1->origin;
  }

  if(flip_image2)
  {
    cvFlip(image2);
    image2->origin = !image2->origin;
  }
}

3. Here is loadCalibration() function and readCameraParameters()

bool Tracksystem::loadCalibration(IplImage *image, bool firstCamera)
{
  bool isFileOpened = readCameraParameters(firstCamera);

  QString tempFile;
  alvar::Camera* cam;

  if(firstCamera)
  {
    tempFile = m_calibrationFile1;
    cam = &m_cam1;
  }
  else
  {
    tempFile = m_calibrationFile2;
    cam = &m_cam2;
  }

  bool calibSetted = cam->SetCalib(tempFile.toStdString().c_str(), image->width, image->height);

  if(!calibSetted)
    cam->SetRes(image->width, image->height);

  return calibSetted ;
}


bool TrackSystem::readCameraParameters(bool firstCam)
{
  QString tempFile;
  Mat* tempMat;
  Mat* tempCoeffs;
  Mat* tempProj;
  Mat* tempTransl;
  Mat* tempRot;

  if(firstCam)
  {
    tempFile = m_calibrationFile1;
    tempMat = &m_cameraMatrix1;
    tempCoeffs = &m_distortionCoeffs1;
    tempProj = &m_projectionMat1;
    tempRot = &m_rotationMat1;
    tempTransl = &m_translationMat1;
  }
  else
  {
    tempFile = m_calibrationFile2;
    tempMat = &m_cameraMatrix2;
    tempCoeffs = &m_distortionCoeffs2;
    tempProj = &m_projectionMat2;
    tempRot = &m_rotationMat2;
    tempTransl = &m_translationMat2;
  }

  cv::FileStorage cameraFile(tempFile.toStdString(), FileStorage::READ );
  bool fsIsOpened = cameraFile.isOpened();

  if(fsIsOpened)    
  {
    cameraFile["intrinsic_matrix"] >> *tempMat;
    cameraFile["distortion"] >> *tempCoeffs;
    if(m_stereoProcess)
    {
      cameraFile["projection_matrix"] >> *tempProj;
      cameraFile["translation"]>> *tempTransl;
      cameraFile["rotation"]>>*tempRot;
    }
  }
  return fsIsOpened;
}

4. Here is processMarkers() function

bool TrackSystem::processMarkers(MarkerDetector<MarkerData>* detector, IplImage* image, int cameraIndex)
{
  alvar::Camera* tempCam = &m_cam1;
  if(m_stereoProcess && cameraIndex == 1)
    tempCam = &m_cam2;

  detector->Detect(image, tempCam, true, false);

  if(detector->markers->size() > 0)
    return true;
  else
    return false;
}

5. Here is calcStereoMarkers() function

void TrackSystem::calcStereoMarkers(IplImage* image, int cameraIndex, MarkerDetector<MarkerData>* marker_detector, QString time)
{
  if(marker_detector->markers->size() > 0)
  {
    double x,y;
    vector<Point2f> orgPoints;
    CvPoint3D64f point3D;
    bool isCalcCorrect = false;

    x = (*(marker_detector->markers))[0].marker_corners_img[0].x;
    y = (*(marker_detector->markers))[0].marker_corners_img[0].y;
    orgPoints.push_back(Point2f(x, y));

    Pose markerPose = (*(marker_detector->markers))[0].pose;
    double ext_translate[3];
    CvMat ext_translate_mat = cvMat(3, 1, CV_64F, ext_translate);
    markerPose.GetTranslation(&ext_translate_mat);
    point3D.x = ext_translate[0];
    point3D.y = ext_translate[1];
    point3D.z = ext_translate[2];

    vector<Point2f> undistPoints;

    Mat* tempMat = &m_cameraMatrix1;
    Mat* tempCoeffs = &m_distortionCoeffs1;
    Mat* tempProj = &m_projectionMat1;
    Mat* tempRot = &m_rotationMat1;
    if(m_stereoProcess && cameraIndex == 1)
    {
      tempMat = &m_cameraMatrix2;
      tempCoeffs = &m_distortionCoeffs2;
      tempProj = &m_projectionMat2;
      tempRot = &m_rotationMat2;
    }

    undistortPoints(orgPoints, undistPoints, *tempMat, *tempCoeffs, *tempRot, *tempProj);

    if(m_isDataSaved)
      m_history.addMarkerCornerData3(cameraIndex, time, undistPoints[0], point3D); 
  }
}