Ask Your Question

Revision history [back]

multithreading on position tracking function opencv c++

I am reading 4 video files using OpenCV C++ and then apply red and green color position tracking to get coordinates (for red and green LED) for each video and then do some other complex analysis on coordinates thus extracted. I was doing it sequentially just now like I have shown in the code below. I thought of making the code fast using multi-threading and read about TBB, std::thread, but kind of novice in multi-threading. Can someone suggest me how to go about using either TBB or std::thread?

#include <iostream>
#include <fstream>
#include <string>
#include <cstdio>
#include "opencv2/opencv_modules.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/timelapsers.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include <numeric>
#include <thread>

#define ENABLE_LOG 1
#define LOG(msg) std::cout << msg
#define LOGLN(msg) std::cout << msg << std::endl 

vector<Point2f> getLedCoordinates(Mat frame)
{
    Point2f redLedPos = Point2f(-1,-1);
    Point2f greenLedPos = Point2f(-1,-1);
    vector<Point2f> ledPos;
    Mat thresholdedImage;

    //thresholded image
    threshold(frame, thresholdedImage, 160, 255,THRESH_BINARY);

    //remove small noise from the red and green colro thesholded image
    Mat str_el = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(2,2));
    morphologyEx(thresholdedImage, thresholdedImage, cv::MORPH_OPEN, str_el);
    morphologyEx(thresholdedImage, thresholdedImage, cv::MORPH_CLOSE, str_el);

    // Convert input image to HSV
    Mat hsv_image;
    cvtColor(thresholdedImage, hsv_image, cv::COLOR_BGR2HSV);

    // Threshold the HSV image, keep only the red pixels
    Mat lower_red_hue_range, upper_red_hue_range;
    inRange(hsv_image, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lower_red_hue_range);
    inRange(hsv_image, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upper_red_hue_range);

    // Combine the above two image
    Mat red_hue_image;
    addWeighted(lower_red_hue_range, 1.0, upper_red_hue_range, 1.0, 0.0, red_hue_image);
    //blur the image to avoid false positives
    GaussianBlur(red_hue_image, red_hue_image, cv::Size(9, 9), 2, 2);

    // Threshold the HSV image, keep only the green pixels
    Mat green_hue_image;
    inRange(hsv_image, cv::Scalar(50, 50, 120), cv::Scalar(70, 255, 255), green_hue_image);

    //blur the image to avoid false positives
    GaussianBlur(green_hue_image, green_hue_image, cv::Size(9, 9), 2, 2);

    //find center of red contours and green contours with max area
    vector<vector<Point> > redContours, greenContours;
    vector<Vec4i> redHeirarchy, greenHeirarchy;

    //find contours
    findContours(red_hue_image.clone(), redContours, redHeirarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
    findContours(green_hue_image.clone(), greenContours, greenHeirarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    //iterate through each contour and find the centroid of max area contour for red LED
    double largest_area = 0;
    int largest_contour_index = 0;
    size_t count = (int)redContours.size();
    if(count>0){
        for(unsigned int i = 0; i< count; i++ )
        {
            //  Find the area of contour
            double a=contourArea(redContours[i],false);
            if(a>largest_area){
                largest_area=a;
                // Store the index of largest contour
                largest_contour_index=i;
        }
        }
        Moments redMoment = moments(redContours[largest_contour_index], false);
        redLedPos = Point2f(redMoment.m10/redMoment.m00, redMoment.m01/redMoment.m00);
    }

    //iterate through each contour and find the centroid of max area contour for green led
    largest_area = 0;
    largest_contour_index = 0;
    count = (int)greenContours.size();
    if(count>0){
        //iterate through each contour and find the centroid of max area contour
        for(unsigned int i = 0; i< count; i++ )
        {
        //  Find the area of contour
        double a=contourArea(greenContours[i],false);
        if(a>largest_area){
            largest_area=a;
            // Store the index of largest contour
            largest_contour_index=i;
            }
        }
        Moments greenMoment = moments(greenContours[largest_contour_index], false);
        greenLedPos = Point2f(greenMoment.m10/greenMoment.m00, greenMoment.m01/greenMoment.m00);
    }

    circle(frame, redLedPos, 1, Scalar(0,0,255));
    circle(frame, greenLedPos, 1, Scalar(0,255,0));

    imshow("Frame", frame);
    imshow("Thresholded Frame", thresholdedImage);

    ledPos.push_back(redLedPos);
    ledPos.push_back(greenLedPos);

    return ledPos;
}

int main(int argc, char* argv[])
{
    int width, height;

    //initlialize variable to hold average red and green led position
    Point2f averageRedLed, averageGreenLed;
    vector<float> red_x, red_y, green_x, green_y;

    //camera capture devices
    vector<VideoCapture> cameraCaptures;
    String videoFileName;
    int64 t = getTickCount();
    LOGLN("Loading Video Files from arguments passed");
    for (int i = 1; i < argc; ++i)
    {
        videoFileName = String(argv[i]);
        VideoCapture cap(videoFileName); // open the video file
        if(!cap.isOpened()){ // check if we succeeded
            LOGLN("Cannot open the video file");
            return -1;
        }
        //save the loaded capture devices
        cameraCaptures.push_back(cap);
    }
    LOGLN("Loading Video Files- time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    LOGLN("Started analyzing the position data");
    while(true)
    {
        //Loading each video file frame
        Mat cam2Frame, cam6Frame, cam7Frame, cam8Frame;
        vector<Mat> inputFrames, outputFrames;

        //get new frame from each capture devices
        cameraCaptures.at(0).read(cam2Frame);
        cameraCaptures.at(1).read(cam6Frame);
        cameraCaptures.at(2).read(cam7Frame);
        cameraCaptures.at(3).read(cam8Frame);

        //check if any camera frame is empty, if empty stop processing
        if((cam2Frame.empty()) || (cam6Frame.empty()) || (cam7Frame.empty())){
            LOGLN("End of File Reached or Camera Frame is empty");
            break;
        }

        //vector to hold led coordinates
        vector<Point2f> cam2LedPos, cam6LedPos, cam7LedPos, cam8LedPos;

        t = getTickCount();
        //get led coordinates for each of the frame
        cam2LedPos = getLedCoordinates(cam2Frame);
        LOGLN("getting Led coordinates - time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
        cam6LedPos = getLedCoordinates(cam6Frame);
        cam7LedPos = getLedCoordinates(cam7Frame);
        cam8LedPos = getLedCoordinates(cam8Frame);


        if((cvWaitKey(10) & 255) == 27) break;
    }
    // the camera will be deinitialized automatically in VideoCapture destructor
    return 0;
}