Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Should we use filters on colored images? Convolving filter performance.

I noticed there is great difference in performance if we use filters on grayscale and colored images. This is an example code

include "opencv2/imgproc/imgproc.hpp"

include "opencv2/highgui/highgui.hpp"

include <iostream>

include <stdlib.h>

include <stdio.h>

using namespace cv;

int main( int argc, char** argv ) { Mat src, gray, dst, abs_dst; src = imread( "../../data/lena.jpg" ); if ( src.empty() ) return -1; /// Remove noise by blurring with a Gaussian filter

double t = (double) cv::getTickCount();
GaussianBlur( src, dst, Size(3,3), 0, 0, BORDER_DEFAULT );
cvtColor( dst, gray, CV_RGB2GRAY );
t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
std::cout << "convertion + blur time: " << t << "ms" << std::endl;
imshow("blured->gray", gray);
waitKey(0);

t = (double) cv::getTickCount();
cvtColor( dst, gray, CV_RGB2GRAY );
GaussianBlur( gray, gray, Size(3,3), 0, 0, BORDER_DEFAULT );
t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
std::cout << "convertion + blur time: " << t << "ms" << std::endl;
imshow("gray->blured", gray);
waitKey(0);
destroyWindow("blured->gray");
destroyWindow("gray->blured");

/// Apply Laplace function
t = (double) cv::getTickCount();
Laplacian( src, dst, CV_16S, 3, 1, 0, BORDER_DEFAULT );
convertScaleAbs( dst, abs_dst );
t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
std::cout << "Laplacian time: " << t << "ms" << std::endl;
imshow( "Laplacian", abs_dst );

waitKey(0);
return 0;

}

So I wonder why should we perform filters on RGB images? Should we or not? I have seen various effects on grayscale images and always you convert color image to grayscale. But if it is faster to apply the filter on grayscale, could we just apply the filter on the grayscale image and then use some trick/effect that will do similar change (blur/smoothing/emboss/edges effects) to the colored images. I think like it should be many times faster than applying kernels on color images. I think like using grayscale image like some kind of "mask" or something what can change the look of the color image.

Should we use filters on colored images? Convolving filter performance.

I noticed there is great difference in performance if we use filters on grayscale and colored images. This is an example code

include "opencv2/imgproc/imgproc.hpp"

include "opencv2/highgui/highgui.hpp"

include <iostream>

include <stdlib.h>

include <stdio.h>

#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <stdlib.h>
#include <stdio.h>

using namespace cv;

cv; int main( int argc, char** argv ) { Mat src, gray, dst, abs_dst; src = imread( "../../data/lena.jpg" ); if ( src.empty() ) return -1; /// Remove noise by blurring with a Gaussian filter

filter
double t = (double) cv::getTickCount();
 GaussianBlur( src, dst, Size(3,3), 0, 0, BORDER_DEFAULT );
 cvtColor( dst, gray, CV_RGB2GRAY );
 t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
 std::cout << "convertion + blur time: " << t << "ms" << std::endl;
 imshow("blured->gray", gray);
 waitKey(0);
 t = (double) cv::getTickCount();
 cvtColor( dst, gray, CV_RGB2GRAY );
 GaussianBlur( gray, gray, Size(3,3), 0, 0, BORDER_DEFAULT );
 t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
 std::cout << "convertion + blur time: " << t << "ms" << std::endl;
 imshow("gray->blured", gray);
 waitKey(0);
 destroyWindow("blured->gray");
 destroyWindow("gray->blured");
 /// Apply Laplace function
 t = (double) cv::getTickCount();
 Laplacian( src, dst, CV_16S, 3, 1, 0, BORDER_DEFAULT );
 convertScaleAbs( dst, abs_dst );
 t = (double) 1000 * (cv::getTickCount() - t) / cv::getTickFrequency();
 std::cout << "Laplacian time: " << t << "ms" << std::endl;
 imshow( "Laplacian", abs_dst );
 waitKey(0);
 return 0;
}

}

So I wonder why should we perform filters on RGB images? Should we or not? I have seen various effects on grayscale images and always you convert color image to grayscale. But if it is faster to apply the filter on grayscale, could we just apply the filter on the grayscale image and then use some trick/effect that will do similar change (blur/smoothing/emboss/edges effects) to the colored images. I think like it should be many times faster than applying kernels on color images. I think like using grayscale image like some kind of "mask" or something what can change the look of the color image.