Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

LED Blinking Frequency

I decided not to update my old post because it already has so many comments. Here is the program I wrote to detect blinking LEDs. It works so so when the surrounding is a bit dark and doesn't work at all when its bright out there. I have been given some suggestion to improve the efficiency like pre allocating but I think I need to work on the logic as well. Kindly guide me how can I detect the position and of blinking LED? Camera frame rate is 90fps and blinking frequency is 45Hz and there are more than one LEDs in the frame. Attached are two frames in a bright light condition. here is the logic 1. Setup camera parameters to make it 90fps 2. Quickly capture 30 frames and compute the difference and threshold of difference of the frames 3. Find contour centers in the the threshold image 4. organize contours in a R*tree and check the frequency of contour centers in user defined neighborhood. 5. If the count falls within the frequency and tolerance range. Predict the point to be LED light.

Kindly guide me to modify this code so that it works in bright light conditions and the success rate of detecting LED is high.C:\fakepath\frame11.jpg(/upfiles/14359976621521109.jpg)

    #include <opencv2\highgui\highgui.hpp>
      #include <opencv2\core\core.hpp>
      #include <opencvlibpath.h>
     #include <FlyCapture2.h>
     #include <opencv\highgui.h>
     #include <opencv\cv.h>
     #include <vector>
     #include <boost/geometry.hpp>
    #include <boost/geometry/geometries/point.hpp>
     #include <boost/geometry/index/rtree.hpp>
   using namespace cv;
    using namespace FlyCapture2;
    using namespace std;
      namespace bg = boost::geometry;
      namespace bgi = boost::geometry::index;
     using  point = bg::model::point < int, 2, bg::cs::cartesian >;
      using  pointI = std::pair<point, std::size_t>;

    struct distance_pred
   {
pointI const& _base;
double        _threshold;

distance_pred(pointI const& base, int max_dist)
    : _base(base)
    , _threshold(max_dist)
{
}
bool operator()(pointI const& p) const
{
    auto d = boost::geometry::distance(_base.first, p.first);
    return ((d == 0) ? 1 : (d && d < _threshold));
}
 };

     std::ostream& operator<<(std::ostream &os, pointI const& p)
    {
os << "{ " << p.first.get<0>() << ", " << p.first.get<1>() << " }";
return os;
    }

      pointI mp(int x, int y, std::size_t index)
    {
return std::make_pair(point(x, y), index);
    }


       const static int NUMBER_OF_FRAME_CAPTURE = 30;
       const static int SENSITIVITY_VALUE = 50;
      const static int BLUR_SIZE = 6;
      const static int FREQUENCY = 30;
      const static int TOLERANCE = 5;
      const static int NEIGHBOURHOOD = 10;
      const static int NUMBEROFLEDS = 1;


       void getThresholdImage(vector<Mat>  &framesToProcess, vector<Mat> &thresholdImages, vector<Mat> &differenceImages)
 {
   vector<Mat> grayImage;

for (int i = 0; i < framesToProcess.size(); i++)
{
    Mat tempMatImage, tempGrayImage;

    resize(framesToProcess[i], tempMatImage, Size(600, 800));
    cvtColor(tempMatImage, tempGrayImage, COLOR_BGR2GRAY);
    grayImage.push_back(tempGrayImage);

    if (i > 0)
    {
        Mat tempDifferenceImage, tempThresholdImage;
        absdiff(grayImage[i - 1], grayImage[i], tempDifferenceImage);
        imshow("difference Image", tempDifferenceImage);
        //erode(tempDifferenceImage, tempDifferenceImage, Mat(), Point(-1, -1), 2, BORDER_CONSTANT);
        differenceImages.push_back(tempDifferenceImage);
        threshold(tempDifferenceImage, tempThresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
        imshow("before blur", tempThresholdImage);
        blur(tempThresholdImage, tempThresholdImage, Size(BLUR_SIZE, BLUR_SIZE));
        imshow("After BlurThreshold Image", tempThresholdImage);
        thresholdImages.push_back(tempThresholdImage);
    }
}
}

     void getContourCenters(vector<Mat>& thresholdImage, vector<point>& contourCenter)
  {
        vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    Rect objectBoundingRectangle = Rect(0, 0, 0, 0);

for (int j = 0; j < thresholdImage.size(); ++j)
{
    Mat temp = thresholdImage[j];
    findContours(temp, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    cout << contours.size();

    for (int k = 0; k < contours.size(); ++k)
    {
        objectBoundingRectangle = boundingRect(contours[k]);
        int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
        int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;
        //cout << endl << xpos << "," << ypos << endl;
        contourCenter.push_back(point(xpos, ypos));
    }
}
}

   void getLEDPoints(vector<Point>& LEDPoints, vector<point>& const contourCenters)
  {
bgi::rtree< pointI, bgi::quadratic<16> > rtree;
vector< pointI > cloud;

for (size_t i = 0; i < contourCenters.size(); ++i)
{
    int x = contourCenters[i].get < 0 >();
    int y = contourCenters[i].get < 1 >();
    cloud.push_back(mp(x, y, i));
    rtree.insert(make_pair(contourCenters[i], i));
}

std::vector<pointI> hood;
std::vector<std::size_t> rec;
for (std::size_t i(0); i < cloud.size(); ++i) if (rec.end() == std::find(rec.begin(), rec.end(), i))
{
    hood.clear();
    auto &p = cloud[i];
    std::cout << "neighborhood of point " << p << "\n-----------------\n\n";

    rtree.query(bgi::satisfies(distance_pred(p, NEIGHBOURHOOD)), std::back_inserter(hood));

    // exlude neighborhood from following queries  
    std::for_each(hood.begin(), hood.end(), [&rec](pointI const& pt) {
        rec.push_back(pt.second);
    });

    if (!hood.empty())
    {
        for (auto &&pt : hood) std::cout << '\t' << pt << std::endl;
        int cnt = hood.size();
        cout << "number of neighbours: " << cnt;
        if (((FREQUENCY - TOLERANCE) <= cnt) && ((FREQUENCY + TOLERANCE) >= cnt))
        {
            //Mat meanMat;
            //reduce(hood, meanMat, CV_REDUCE_AVG, 1);
            //Point mean(meanMat.at<float>(0), meanMat.at<float>(0));
            LEDPoints.push_back(Point(p.first.get<0>(), p.first.get<1>()));
            //LEDPoints.push_back(mean);
        }
    }
}

}

void PrintError(Error error)
{
    error.PrintErrorTrace();
}


 void setupCamera(Camera &camera)
 {
Error error;
const Mode k_fmt7Mode = MODE_0;
const PixelFormat k_fmt7PixFmt = PIXEL_FORMAT_RAW8;
const unsigned int k_bytesPerPacket = 36764;

Format7Info fmt7Info;
bool supported;
fmt7Info.mode = k_fmt7Mode;
error = camera.GetFormat7Info(&fmt7Info, &supported);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if ((k_fmt7PixFmt & fmt7Info.pixelFormatBitField) == 0)
{
    // Pixel format not supported!
    cout << "Pixel format is not supported" << endl;
    getchar();
    exit(1);
}

Format7ImageSettings fmt7ImageSettings;
fmt7ImageSettings.mode = k_fmt7Mode;
fmt7ImageSettings.offsetX = 128;
fmt7ImageSettings.offsetY = 128;
fmt7ImageSettings.width = 1792;
fmt7ImageSettings.height = 1792;
fmt7ImageSettings.pixelFormat = k_fmt7PixFmt;

bool valid;
Format7PacketInfo fmt7PacketInfo;

error = camera.ValidateFormat7Settings(
    &fmt7ImageSettings,
    &valid,
    &fmt7PacketInfo);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if (!valid)
{
    // Settings are not valid
    cout << "Format7 settings are not valid" << endl;
    getchar();
    exit(1);
}

// Set the settings to the camera
error = camera.SetFormat7Configuration(
    &fmt7ImageSettings,
    k_bytesPerPacket);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

}

 int main()
{
Mat cameraFeed;

vector<Image> rawImages;
vector<Mat> framesToProcess;
vector<Mat> thresholdImages;
vector<Mat> differenceImages;
vector<point> contourCenters;
vector<Point> LEDPoints;

Camera camera;
Error error;

error = camera.Connect(0);
if (error != PGRERROR_OK)
{
    cout << "Failed to connect to camera" << endl;
    getchar();
    exit(1);
}

setupCamera(camera);


error = camera.StartCapture();
if (error == PGRERROR_ISOCH_BANDWIDTH_EXCEEDED)
{
    cout << "Bandwidth exceeded" << endl;
    getchar();
    exit(1);
}
else if (error != PGRERROR_OK)
{
    cout << "Failed to start image capture" << endl;
    getchar();
    exit(1);
}

/*Property frmRate;
frmRate.type = FRAME_RATE;
error = camera.GetProperty(&frmRate);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

cout << "Frame rate is " << fixed << frmRate.absValue << " fps" << endl; */

//int x = 0;

while (1)
{

    framesToProcess.clear();
    thresholdImages.clear();
    rawImages.clear();
    LEDPoints.clear();
    contourCenters.clear();


    for (int i = 0; i < NUMBER_OF_FRAME_CAPTURE; i++)
    {
        Mat currentFrame;
        Image rawImage;
        Error error = camera.RetrieveBuffer(&rawImage);
        if (error != PGRERROR_OK)
        {
            cout << "capture error" << endl;
        }

        // convert to rgb
        Image rgbImage;
        Mat tempImage;
        rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

        // convert to OpenCV Mat
        unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
        currentFrame = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
        tempImage = currentFrame.clone();
        framesToProcess.push_back(tempImage);
    }


    getThresholdImage(framesToProcess, thresholdImages, differenceImages);


    cout << "Threshold image size" << thresholdImages.size();
    cout << "frames to process image size" << framesToProcess.size();


    getContourCenters(thresholdImages, contourCenters);


    getLEDPoints(LEDPoints, contourCenters); 

    cout << endl << "LED Points size is" << LEDPoints.size() << endl;

    Image rawImage;
    Error error = camera.RetrieveBuffer(&rawImage);
    if (error != PGRERROR_OK)
    {
        cout << "capture error" << endl;
    }

    // convert to rgb
    Image rgbImage;
    rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

    // convert to OpenCV Mat
    unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
    cameraFeed = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
    resize(cameraFeed, cameraFeed, Size(800, 600));

    if (LEDPoints.size() > 0)
    {

        /*if (LEDPoints.size > NUMBEROFLEDS)
        {
            vector<int> LEDCenters(NUMBEROFLEDS);

            kmeans(LEDPoints, NUMBEROFLEDS, labels,
                TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0),
                3, KMEANS_PP_CENTERS, LEDCenters);
        }*/

        for (int i = 0; i < LEDPoints.size(); ++i)
        {
            circle(cameraFeed, LEDPoints[i], 20, Scalar(0, 255, 0), 2);
        }
    }
    //++x;
    imshow("Camera Feed", cameraFeed);

    waitKey(1);
}

camera.Disconnect();

return(0);

}

LED Blinking Frequency

C:\fakepath\frame12.jpgI decided not to update my old post because it already has so many comments. Here is the program I wrote to detect blinking LEDs. It works so so when the surrounding is a bit dark and doesn't work at all when its bright out there. I have been given some suggestion to improve the efficiency like pre allocating but I think I need to work on the logic as well. Kindly guide me how can I detect the position and of blinking LED? Camera frame rate is 90fps and blinking frequency is 45Hz and there are more than one LEDs in the frame. Attached are two frames in a bright light condition. here is the logic 1. Setup camera parameters to make it 90fps 2. Quickly capture 30 frames and compute the difference and threshold of difference of the frames 3. Find contour centers in the the threshold image 4. organize contours in a R*tree and check the frequency of contour centers in user defined neighborhood. 5. If the count falls within the frequency and tolerance range. Predict the point to be LED light.

Kindly guide me to modify this code so that it works in bright light conditions and the success rate of detecting LED is high.C:\fakepath\frame11.jpg(/upfiles/14359976621521109.jpg)

    #include <opencv2\highgui\highgui.hpp>
      #include <opencv2\core\core.hpp>
      #include <opencvlibpath.h>
     #include <FlyCapture2.h>
     #include <opencv\highgui.h>
     #include <opencv\cv.h>
     #include <vector>
     #include <boost/geometry.hpp>
    #include <boost/geometry/geometries/point.hpp>
     #include <boost/geometry/index/rtree.hpp>
   using namespace cv;
    using namespace FlyCapture2;
    using namespace std;
      namespace bg = boost::geometry;
      namespace bgi = boost::geometry::index;
     using  point = bg::model::point < int, 2, bg::cs::cartesian >;
      using  pointI = std::pair<point, std::size_t>;

    struct distance_pred
   {
pointI const& _base;
double        _threshold;

distance_pred(pointI const& base, int max_dist)
    : _base(base)
    , _threshold(max_dist)
{
}
bool operator()(pointI const& p) const
{
    auto d = boost::geometry::distance(_base.first, p.first);
    return ((d == 0) ? 1 : (d && d < _threshold));
}
 };

     std::ostream& operator<<(std::ostream &os, pointI const& p)
    {
os << "{ " << p.first.get<0>() << ", " << p.first.get<1>() << " }";
return os;
    }

      pointI mp(int x, int y, std::size_t index)
    {
return std::make_pair(point(x, y), index);
    }


       const static int NUMBER_OF_FRAME_CAPTURE = 30;
       const static int SENSITIVITY_VALUE = 50;
      const static int BLUR_SIZE = 6;
      const static int FREQUENCY = 30;
      const static int TOLERANCE = 5;
      const static int NEIGHBOURHOOD = 10;
      const static int NUMBEROFLEDS = 1;


       void getThresholdImage(vector<Mat>  &framesToProcess, vector<Mat> &thresholdImages, vector<Mat> &differenceImages)
 {
   vector<Mat> grayImage;

for (int i = 0; i < framesToProcess.size(); i++)
{
    Mat tempMatImage, tempGrayImage;

    resize(framesToProcess[i], tempMatImage, Size(600, 800));
    cvtColor(tempMatImage, tempGrayImage, COLOR_BGR2GRAY);
    grayImage.push_back(tempGrayImage);

    if (i > 0)
    {
        Mat tempDifferenceImage, tempThresholdImage;
        absdiff(grayImage[i - 1], grayImage[i], tempDifferenceImage);
        imshow("difference Image", tempDifferenceImage);
        //erode(tempDifferenceImage, tempDifferenceImage, Mat(), Point(-1, -1), 2, BORDER_CONSTANT);
        differenceImages.push_back(tempDifferenceImage);
        threshold(tempDifferenceImage, tempThresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
        imshow("before blur", tempThresholdImage);
        blur(tempThresholdImage, tempThresholdImage, Size(BLUR_SIZE, BLUR_SIZE));
        imshow("After BlurThreshold Image", tempThresholdImage);
        thresholdImages.push_back(tempThresholdImage);
    }
}
}

     void getContourCenters(vector<Mat>& thresholdImage, vector<point>& contourCenter)
  {
        vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    Rect objectBoundingRectangle = Rect(0, 0, 0, 0);

for (int j = 0; j < thresholdImage.size(); ++j)
{
    Mat temp = thresholdImage[j];
    findContours(temp, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    cout << contours.size();

    for (int k = 0; k < contours.size(); ++k)
    {
        objectBoundingRectangle = boundingRect(contours[k]);
        int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
        int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;
        //cout << endl << xpos << "," << ypos << endl;
        contourCenter.push_back(point(xpos, ypos));
    }
}
}

   void getLEDPoints(vector<Point>& LEDPoints, vector<point>& const contourCenters)
  {
bgi::rtree< pointI, bgi::quadratic<16> > rtree;
vector< pointI > cloud;

for (size_t i = 0; i < contourCenters.size(); ++i)
{
    int x = contourCenters[i].get < 0 >();
    int y = contourCenters[i].get < 1 >();
    cloud.push_back(mp(x, y, i));
    rtree.insert(make_pair(contourCenters[i], i));
}

std::vector<pointI> hood;
std::vector<std::size_t> rec;
for (std::size_t i(0); i < cloud.size(); ++i) if (rec.end() == std::find(rec.begin(), rec.end(), i))
{
    hood.clear();
    auto &p = cloud[i];
    std::cout << "neighborhood of point " << p << "\n-----------------\n\n";

    rtree.query(bgi::satisfies(distance_pred(p, NEIGHBOURHOOD)), std::back_inserter(hood));

    // exlude neighborhood from following queries  
    std::for_each(hood.begin(), hood.end(), [&rec](pointI const& pt) {
        rec.push_back(pt.second);
    });

    if (!hood.empty())
    {
        for (auto &&pt : hood) std::cout << '\t' << pt << std::endl;
        int cnt = hood.size();
        cout << "number of neighbours: " << cnt;
        if (((FREQUENCY - TOLERANCE) <= cnt) && ((FREQUENCY + TOLERANCE) >= cnt))
        {
            //Mat meanMat;
            //reduce(hood, meanMat, CV_REDUCE_AVG, 1);
            //Point mean(meanMat.at<float>(0), meanMat.at<float>(0));
            LEDPoints.push_back(Point(p.first.get<0>(), p.first.get<1>()));
            //LEDPoints.push_back(mean);
        }
    }
}

}

void PrintError(Error error)
{
    error.PrintErrorTrace();
}


 void setupCamera(Camera &camera)
 {
Error error;
const Mode k_fmt7Mode = MODE_0;
const PixelFormat k_fmt7PixFmt = PIXEL_FORMAT_RAW8;
const unsigned int k_bytesPerPacket = 36764;

Format7Info fmt7Info;
bool supported;
fmt7Info.mode = k_fmt7Mode;
error = camera.GetFormat7Info(&fmt7Info, &supported);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if ((k_fmt7PixFmt & fmt7Info.pixelFormatBitField) == 0)
{
    // Pixel format not supported!
    cout << "Pixel format is not supported" << endl;
    getchar();
    exit(1);
}

Format7ImageSettings fmt7ImageSettings;
fmt7ImageSettings.mode = k_fmt7Mode;
fmt7ImageSettings.offsetX = 128;
fmt7ImageSettings.offsetY = 128;
fmt7ImageSettings.width = 1792;
fmt7ImageSettings.height = 1792;
fmt7ImageSettings.pixelFormat = k_fmt7PixFmt;

bool valid;
Format7PacketInfo fmt7PacketInfo;

error = camera.ValidateFormat7Settings(
    &fmt7ImageSettings,
    &valid,
    &fmt7PacketInfo);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if (!valid)
{
    // Settings are not valid
    cout << "Format7 settings are not valid" << endl;
    getchar();
    exit(1);
}

// Set the settings to the camera
error = camera.SetFormat7Configuration(
    &fmt7ImageSettings,
    k_bytesPerPacket);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

}

 int main()
{
Mat cameraFeed;

vector<Image> rawImages;
vector<Mat> framesToProcess;
vector<Mat> thresholdImages;
vector<Mat> differenceImages;
vector<point> contourCenters;
vector<Point> LEDPoints;

Camera camera;
Error error;

error = camera.Connect(0);
if (error != PGRERROR_OK)
{
    cout << "Failed to connect to camera" << endl;
    getchar();
    exit(1);
}

setupCamera(camera);


error = camera.StartCapture();
if (error == PGRERROR_ISOCH_BANDWIDTH_EXCEEDED)
{
    cout << "Bandwidth exceeded" << endl;
    getchar();
    exit(1);
}
else if (error != PGRERROR_OK)
{
    cout << "Failed to start image capture" << endl;
    getchar();
    exit(1);
}

/*Property frmRate;
frmRate.type = FRAME_RATE;
error = camera.GetProperty(&frmRate);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

cout << "Frame rate is " << fixed << frmRate.absValue << " fps" << endl; */

//int x = 0;

while (1)
{

    framesToProcess.clear();
    thresholdImages.clear();
    rawImages.clear();
    LEDPoints.clear();
    contourCenters.clear();


    for (int i = 0; i < NUMBER_OF_FRAME_CAPTURE; i++)
    {
        Mat currentFrame;
        Image rawImage;
        Error error = camera.RetrieveBuffer(&rawImage);
        if (error != PGRERROR_OK)
        {
            cout << "capture error" << endl;
        }

        // convert to rgb
        Image rgbImage;
        Mat tempImage;
        rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

        // convert to OpenCV Mat
        unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
        currentFrame = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
        tempImage = currentFrame.clone();
        framesToProcess.push_back(tempImage);
    }


    getThresholdImage(framesToProcess, thresholdImages, differenceImages);


    cout << "Threshold image size" << thresholdImages.size();
    cout << "frames to process image size" << framesToProcess.size();


    getContourCenters(thresholdImages, contourCenters);


    getLEDPoints(LEDPoints, contourCenters); 

    cout << endl << "LED Points size is" << LEDPoints.size() << endl;

    Image rawImage;
    Error error = camera.RetrieveBuffer(&rawImage);
    if (error != PGRERROR_OK)
    {
        cout << "capture error" << endl;
    }

    // convert to rgb
    Image rgbImage;
    rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

    // convert to OpenCV Mat
    unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
    cameraFeed = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
    resize(cameraFeed, cameraFeed, Size(800, 600));

    if (LEDPoints.size() > 0)
    {

        /*if (LEDPoints.size > NUMBEROFLEDS)
        {
            vector<int> LEDCenters(NUMBEROFLEDS);

            kmeans(LEDPoints, NUMBEROFLEDS, labels,
                TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0),
                3, KMEANS_PP_CENTERS, LEDCenters);
        }*/

        for (int i = 0; i < LEDPoints.size(); ++i)
        {
            circle(cameraFeed, LEDPoints[i], 20, Scalar(0, 255, 0), 2);
        }
    }
    //++x;
    imshow("Camera Feed", cameraFeed);

    waitKey(1);
}

camera.Disconnect();

return(0);

}

LED Blinking Frequency

C:\fakepath\frame12.jpgI decided not to update my old post because it already has so many comments. Here is the program I wrote to detect blinking LEDs. It works so so when the surrounding is a bit dark and doesn't work at all when its bright out there. I have been given some suggestion to improve the efficiency like pre allocating but I think I need to work on the logic as well. Kindly guide me how can I detect the position and of blinking LED? Camera frame rate is 90fps and blinking frequency is 45Hz and there are more than one LEDs in the frame. Attached are two frames in a bright light condition. here is the logic 1. Setup camera parameters to make it 90fps 2. Quickly capture 30 frames and compute the difference and threshold of difference of the frames 3. Find contour centers in the the threshold image 4. organize contours in a R*tree and check the frequency of contour centers in user defined neighborhood. 5. If the count falls within the frequency and tolerance range. Predict the point to be LED light.

Kindly guide me to modify this code so that it works in bright light conditions and the success rate of detecting LED is high.C:\fakepath\frame11.jpg(/upfiles/14359976621521109.jpg)(/upfiles/14359976621521109.jpg).

As suggested the question seems to be too long. I am trying to get the difference between two frames and threshold the difference, check for contours and then check the frequency of contour center to detect the light. Following function accepts N number of images and does as explained. I need this to work in all light scenario, it is working in low light environment only. Kindly guide me how can I modify the code to make it work in any scenario.

    #include <opencv2\highgui\highgui.hpp>
      #include <opencv2\core\core.hpp>
      #include <opencvlibpath.h>
     #include <FlyCapture2.h>
     #include <opencv\highgui.h>
     #include <opencv\cv.h>
     #include <vector>
     #include <boost/geometry.hpp>
    #include <boost/geometry/geometries/point.hpp>
     #include <boost/geometry/index/rtree.hpp>
   using namespace cv;
    using namespace FlyCapture2;
    using namespace std;
      namespace bg = boost::geometry;
      namespace bgi = boost::geometry::index;
     using  point = bg::model::point < int, 2, bg::cs::cartesian >;
      using  pointI = std::pair<point, std::size_t>;

    struct distance_pred
   {
pointI const& _base;
double        _threshold;

distance_pred(pointI const& base, int max_dist)
    : _base(base)
    , _threshold(max_dist)
{
}
bool operator()(pointI const& p) const
{
    auto d = boost::geometry::distance(_base.first, p.first);
    return ((d == 0) ? 1 : (d && d < _threshold));
}
 };

     std::ostream& operator<<(std::ostream &os, pointI const& p)
    {
os << "{ " << p.first.get<0>() << ", " << p.first.get<1>() << " }";
return os;
    }

      pointI mp(int x, int y, std::size_t index)
    {
return std::make_pair(point(x, y), index);
    }


       const static int NUMBER_OF_FRAME_CAPTURE = 30;
       const static int SENSITIVITY_VALUE = 50;
      const static int BLUR_SIZE = 6;
      const static int FREQUENCY = 30;
      const static int TOLERANCE = 5;
      const static int NEIGHBOURHOOD = 10;
      const static int NUMBEROFLEDS = 1;


       void getThresholdImage(vector<Mat>  &framesToProcess, vector<Mat> &thresholdImages, vector<Mat> &differenceImages)
 {
   vector<Mat> grayImage;

for (int i = 0; i < framesToProcess.size(); i++)
{
    Mat tempMatImage, tempGrayImage;

    resize(framesToProcess[i], tempMatImage, Size(600, 800));
    cvtColor(tempMatImage, tempGrayImage, COLOR_BGR2GRAY);
    grayImage.push_back(tempGrayImage);

    if (i > 0)
    {
        Mat tempDifferenceImage, tempThresholdImage;
        absdiff(grayImage[i - 1], grayImage[i], tempDifferenceImage);
        imshow("difference Image", tempDifferenceImage);
        //erode(tempDifferenceImage, tempDifferenceImage, Mat(), Point(-1, -1), 2, BORDER_CONSTANT);
        differenceImages.push_back(tempDifferenceImage);
        threshold(tempDifferenceImage, tempThresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
        imshow("before blur", tempThresholdImage);
        blur(tempThresholdImage, tempThresholdImage, Size(BLUR_SIZE, BLUR_SIZE));
        imshow("After BlurThreshold Image", tempThresholdImage);
        thresholdImages.push_back(tempThresholdImage);
    }
}
}

     void getContourCenters(vector<Mat>& thresholdImage, vector<point>& contourCenter)
  {
        vector< vector<Point> > contours;
    vector<Vec4i> hierarchy;
    Rect objectBoundingRectangle = Rect(0, 0, 0, 0);

for (int j = 0; j < thresholdImage.size(); ++j)
{
    Mat temp = thresholdImage[j];
    findContours(temp, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    cout << contours.size();

    for (int k = 0; k < contours.size(); ++k)
    {
        objectBoundingRectangle = boundingRect(contours[k]);
        int xpos = objectBoundingRectangle.x + objectBoundingRectangle.width / 2;
        int ypos = objectBoundingRectangle.y + objectBoundingRectangle.height / 2;
        //cout << endl << xpos << "," << ypos << endl;
        contourCenter.push_back(point(xpos, ypos));
    }
}
}

   void getLEDPoints(vector<Point>& LEDPoints, vector<point>& const contourCenters)
  {
bgi::rtree< pointI, bgi::quadratic<16> > rtree;
vector< pointI > cloud;

for (size_t i = 0; i < contourCenters.size(); ++i)
{
    int x = contourCenters[i].get < 0 >();
    int y = contourCenters[i].get < 1 >();
    cloud.push_back(mp(x, y, i));
    rtree.insert(make_pair(contourCenters[i], i));
}

std::vector<pointI> hood;
std::vector<std::size_t> rec;
for (std::size_t i(0); i < cloud.size(); ++i) if (rec.end() == std::find(rec.begin(), rec.end(), i))
{
    hood.clear();
    auto &p = cloud[i];
    std::cout << "neighborhood of point " << p << "\n-----------------\n\n";

    rtree.query(bgi::satisfies(distance_pred(p, NEIGHBOURHOOD)), std::back_inserter(hood));

    // exlude neighborhood from following queries  
    std::for_each(hood.begin(), hood.end(), [&rec](pointI const& pt) {
        rec.push_back(pt.second);
    });

    if (!hood.empty())
    {
        for (auto &&pt : hood) std::cout << '\t' << pt << std::endl;
        int cnt = hood.size();
        cout << "number of neighbours: " << cnt;
        if (((FREQUENCY - TOLERANCE) <= cnt) && ((FREQUENCY + TOLERANCE) >= cnt))
        {
            //Mat meanMat;
            //reduce(hood, meanMat, CV_REDUCE_AVG, 1);
            //Point mean(meanMat.at<float>(0), meanMat.at<float>(0));
            LEDPoints.push_back(Point(p.first.get<0>(), p.first.get<1>()));
            //LEDPoints.push_back(mean);
        }
    }
}

}

void PrintError(Error error)
{
    error.PrintErrorTrace();
}


 void setupCamera(Camera &camera)
 {
Error error;
const Mode k_fmt7Mode = MODE_0;
const PixelFormat k_fmt7PixFmt = PIXEL_FORMAT_RAW8;
const unsigned int k_bytesPerPacket = 36764;

Format7Info fmt7Info;
bool supported;
fmt7Info.mode = k_fmt7Mode;
error = camera.GetFormat7Info(&fmt7Info, &supported);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if ((k_fmt7PixFmt & fmt7Info.pixelFormatBitField) == 0)
{
    // Pixel format not supported!
    cout << "Pixel format is not supported" << endl;
    getchar();
    exit(1);
}

Format7ImageSettings fmt7ImageSettings;
fmt7ImageSettings.mode = k_fmt7Mode;
fmt7ImageSettings.offsetX = 128;
fmt7ImageSettings.offsetY = 128;
fmt7ImageSettings.width = 1792;
fmt7ImageSettings.height = 1792;
fmt7ImageSettings.pixelFormat = k_fmt7PixFmt;

bool valid;
Format7PacketInfo fmt7PacketInfo;

error = camera.ValidateFormat7Settings(
    &fmt7ImageSettings,
    &valid,
    &fmt7PacketInfo);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

if (!valid)
{
    // Settings are not valid
    cout << "Format7 settings are not valid" << endl;
    getchar();
    exit(1);
}

// Set the settings to the camera
error = camera.SetFormat7Configuration(
    &fmt7ImageSettings,
    k_bytesPerPacket);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

}

 int main()
{
Mat cameraFeed;

vector<Image> rawImages;
vector<Mat> framesToProcess;
vector<Mat> thresholdImages;
vector<Mat> differenceImages;
vector<point> contourCenters;
vector<Point> LEDPoints;

Camera camera;
Error error;

error = camera.Connect(0);
if (error != PGRERROR_OK)
{
    cout << "Failed to connect to camera" << endl;
    getchar();
    exit(1);
}

setupCamera(camera);


error = camera.StartCapture();
if (error == PGRERROR_ISOCH_BANDWIDTH_EXCEEDED)
{
    cout << "Bandwidth exceeded" << endl;
    getchar();
    exit(1);
}
else if (error != PGRERROR_OK)
{
    cout << "Failed to start image capture" << endl;
    getchar();
    exit(1);
}

/*Property frmRate;
frmRate.type = FRAME_RATE;
error = camera.GetProperty(&frmRate);
if (error != PGRERROR_OK)
{
    PrintError(error);
    getchar();
    exit(1);
}

cout << "Frame rate is " << fixed << frmRate.absValue << " fps" << endl; */

//int x = 0;

while (1)
{

    framesToProcess.clear();
    thresholdImages.clear();
    rawImages.clear();
    LEDPoints.clear();
    contourCenters.clear();


    for (int i = 0; i < NUMBER_OF_FRAME_CAPTURE; i++)
    {
        Mat currentFrame;
        Image rawImage;
        Error error = camera.RetrieveBuffer(&rawImage);
        if (error != PGRERROR_OK)
        {
            cout << "capture error" << endl;
        }

        // convert to rgb
        Image rgbImage;
        Mat tempImage;
        rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

        // convert to OpenCV Mat
        unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
        currentFrame = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
        tempImage = currentFrame.clone();
        framesToProcess.push_back(tempImage);
    }


    getThresholdImage(framesToProcess, thresholdImages, differenceImages);


    cout << "Threshold image size" << thresholdImages.size();
    cout << "frames to process image size" << framesToProcess.size();


    getContourCenters(thresholdImages, contourCenters);


    getLEDPoints(LEDPoints, contourCenters); 

    cout << endl << "LED Points size is" << LEDPoints.size() << endl;

    Image rawImage;
    Error error = camera.RetrieveBuffer(&rawImage);
    if (error != PGRERROR_OK)
    {
        cout << "capture error" << endl;
    }

    // convert to rgb
    Image rgbImage;
    rawImage.Convert(FlyCapture2::PIXEL_FORMAT_BGR, &rgbImage);

    // convert to OpenCV Mat
    unsigned int rowBytes = (double)rgbImage.GetReceivedDataSize() / (double)rgbImage.GetRows();
    cameraFeed = Mat(rgbImage.GetRows(), rgbImage.GetCols(), CV_8UC3, rgbImage.GetData(), rowBytes);
    resize(cameraFeed, cameraFeed, Size(800, 600));

    if (LEDPoints.size() > 0)
    {

        /*if (LEDPoints.size > NUMBEROFLEDS)
        {
            vector<int> LEDCenters(NUMBEROFLEDS);

            kmeans(LEDPoints, NUMBEROFLEDS, labels,
                TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0),
                3, KMEANS_PP_CENTERS, LEDCenters);
        }*/

        for (int i = 0; i < LEDPoints.size(); ++i)
        {
            circle(cameraFeed, LEDPoints[i], 20, Scalar(0, 255, 0), 2);
        }
    }
    //++x;
    imshow("Camera Feed", cameraFeed);

    waitKey(1);
}

camera.Disconnect();

return(0);

}