Ive been looking at cap_v4l.cpp
specifically
case V4L2_PIX_FMT_Y16:
{
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].start);
cv::Mat(imageSize, CV_16UC1, currentBuffer.start).convertTo(temp, CV_8U, 1.0 / 256);
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_Y12:
{
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].start);
cv::Mat(imageSize, CV_16UC1, currentBuffer.start).convertTo(temp, CV_8U, 1.0 / 16);
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_Y10:
{
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].start);
cv::Mat(imageSize, CV_16UC1, currentBuffer.start).convertTo(temp, CV_8U, 1.0 / 4);
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
return;
}
to me this looks like it converts 16bit (12bit or 10bit) to 8bit before converting to BGR an would therefore lose accuracy. am i correct in thinking this? should this code be using CV_16U instead?