Ask Your Question

Noman's profile - activity

2018-10-13 15:36:24 -0600 received badge  Notable Question (source)
2017-12-10 08:58:05 -0600 received badge  Popular Question (source)
2015-12-05 13:43:56 -0600 asked a question How to display Video in opencv-Qt ?

After following some answers in the stackoverflow, I understood how to use timer to display videos in qt. But Still my program doesn't even get to the function that will be called repeatedly. I am not understanding what I am doing wrong. Here's my code -

VideoCapture cap(0);
QTimer *imageTimer;
Mat frame;

MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
{
    ui.setupUi(this);
    const int imagePeriod = 1000 / 25;   // ms        
    imageTimer = new QTimer(this);
    imageTimer->setInterval(imagePeriod);
    connect(imageTimer, SIGNAL(timeout()), this, SLOT(showVideo()));
}

MainWindow::~MainWindow()
{

}

QImage MainWindow::getQImage(cv::Mat &timage){

    static QVector<QRgb> colorTable;

    if (colorTable.isEmpty()){
        for (int i = 0; i < 256; i++){
            colorTable.push_back(qRgb(i, i, i));
        }
    }

    if (timage.type() == CV_8UC3){
        QImage temp = QImage((const unsigned char*)(timage.data), timage.cols, timage.rows, timage.step, QImage::Format_RGB888);
        return temp.rgbSwapped();
    }
    else if (timage.type() == CV_8UC1){
        QImage temp = QImage((const unsigned char*)(timage.data), timage.cols, timage.rows, timage.step, QImage::Format_Indexed8);
        temp.setColorTable(colorTable);
        return temp;
    }
}

void MainWindow::on_startButton_pressed(){
    imageTimer->start(10);

}

void MainWindow::showVideo(){

    cap.read(frame);

    ui.videoFeed->setScaledContents(true);
    ui.videoFeed->setPixmap(QPixmap::fromImage(getQImage(frame)));
    ui.videoFeed->resize(ui.videoFeed->pixmap()->size());

    this->update();
}

void MainWindow::on_stopButton_pressed(){
    imageTimer->stop();
}

This is the Header FIle

class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = 0);
    ~MainWindow();
    QImage MainWindow::getQImage(cv::Mat &timage);


private:
    Ui::MainWindowClass ui;

private slots:

    void on_startButton_pressed();
    void on_stopButton_pressed();

    public slots:

    void MainWindow::showVideo();
};
2015-11-08 05:06:40 -0600 received badge  Editor (source)
2015-11-08 05:04:39 -0600 asked a question What is the right/best way for Eye detection ?

I want to detect eyes inside a detected face in webcam footage. I am using haarcascade eye and split eye for detecting eyes. But the eye detection is too much unstable. The detected rectangle is always scaling, moving around and half of the time not detecting anything. It's too unstable for aligning and cropping the face correctly. Am I doing something wrong or is there any better way of eye detection?

I am currently following the Mastering opencv books Face Recognition formula. Heres my code -

        Mat topLeftOfFace;
        Mat topRightOfFace;

        //Getting the LeftEYe and RIghtEYe Portions of Images -- 

        getEyeRegions(biggestFace,"eye", topLeftOfFace, topRightOfFace,leftX,rightX,topY);

        //Dtecting the Left Eye Cascade

        newScale = topLeftOfFace.cols;
        Rect leftEye = detectCascadeSingle(topLeftOfFace, eyeDetector, newScale);

        if (leftEye.width <= 0){
            getEyeRegions(biggestFace, "split_eye", topLeftOfFace, Mat(0,0,CV_8U),leftX,rightX,topY);
            newScale = topLeftOfFace.cols;
            leftEye = detectCascadeSingle(topLeftOfFace, leftEyeDetector, newScale);
        }
        Point leftEyeCenter = Point(-1,-1);

        if (leftEye.width >= 0){
            leftEyeCenter.x = leftEye.x + leftEye.width / 2 + leftX;
            leftEyeCenter.y = leftEye.y + leftEye.height / 2 + topY;
        }
        //Detecting the RIght Eye cascade\

        newScale = topRightOfFace.cols;
        Rect rightEye = detectCascadeSingle(topRightOfFace, eyeDetector, newScale);

        if (rightEye.width <= 0){
            getEyeRegions(biggestFace, "split_eye", Mat(0, 0, CV_8U), topRightOfFace,leftX, rightX, topY);
            newScale = topRightOfFace.cols;
            rightEye = detectCascadeSingle(topRightOfFace, rightEyeDetector, newScale);
        }

        Point rightEyeCenter = Point(-1, -1);

        if (rightEye.width > 0){
            rightEyeCenter.x = rightEye.x + rightEye.width / 2 + rightX;
            rightEyeCenter.y = rightEye.y + rightEye.height / 2 + topY;
        }

And the getEyeRegion Function -

void getEyeRegions(Mat biggestFace,string mode, Mat &topLeftOfFace, Mat &topRightOfFace, int &leftX, int      &rightX, int &topY){

int widthX;
int heightY;

float EYE_SX;
float EYE_SY;
float EYE_SW;
float EYE_SH;

if (mode == "eye"){
    EYE_SX = 0.16;
    EYE_SY = 0.26;
    EYE_SW = 0.30;
    EYE_SH = 0.28;
}
else if (mode == "split_eye"){
    EYE_SX = 0.12;
    EYE_SY = 0.17;
    EYE_SW = 0.37;
    EYE_SH = 0.36;
}

leftX = cvRound(biggestFace.cols * EYE_SX);
topY = cvRound(biggestFace.rows * EYE_SY);
widthX = cvRound(biggestFace.cols * EYE_SW);
heightY = cvRound(biggestFace.rows * EYE_SH);
rightX = cvRound(biggestFace.cols * (1.0 - EYE_SX - EYE_SW));

topLeftOfFace = biggestFace(Rect(leftX, topY, widthX,
    heightY));
topRightOfFace = biggestFace(Rect(rightX, topY, widthX,
    heightY));

The detectCascadeSingle Function -

cv::Rect detectCascadeSingle(Mat img, CascadeClassifier faceDetector, int scaledWidth){
if (img.channels() == 3){
    cvtColor(img, img, CV_BGR2GRAY);
}
else if (img.channels() == 4){
    cvtColor(img, img, CV_BGRA2GRAY);
}
int DETECTION_WIDTH = scaledWidth;

float scale;
scale = img.cols / DETECTION_WIDTH;

if (img.cols > DETECTION_WIDTH){
    int scaledHeight = cvRound(img.rows / scale);
    resize(img, img, Size(DETECTION_WIDTH, scaledHeight));
}


//Image historgram equalization

Mat equalizedImg;

equalizeHist(img, img);


int flags = CASCADE_SCALE_IMAGE;

int biggestFlag = CASCADE_FIND_BIGGEST_OBJECT | CASCADE_DO_ROUGH_SEARCH;

Size minFeatureSize(5, 5);

float searchScaleFactor = 1.1f;

int minNeighbors = 4;

std::vector<Rect> faces;

faceDetector.detectMultiScale(img, faces, searchScaleFactor, minNeighbors, biggestFlag, minFeatureSize);


if (img.cols > DETECTION_WIDTH){
    for (int i = 0; i < (int)faces.size(); i++){
        faces[i].x = cvRound(faces[i].x * scale);
        faces[i].y = cvRound(faces[i].y * scale);
        faces[i].width = cvRound(faces[i].width * scale);
        faces[i].height = cvRound(faces[i].height * scale);
    }
}

//Keeping the face inside the border 

for (int i = 0; i < (int)faces.size(); i++){
    if (faces[i].x < 0){
        faces[i].x = 0;
    }
    if (faces[i].y ...
(more)
2015-10-21 10:34:36 -0600 received badge  Scholar (source)
2015-10-21 10:30:44 -0600 commented answer What is the alternative of getMat() of Facerecognizer class in opencv3.00 ?

Thank you very much. It worked.

2015-10-21 10:29:57 -0600 received badge  Supporter (source)
2015-10-21 10:16:36 -0600 commented answer What is the alternative of getMat() of Facerecognizer class in opencv3.00 ?

Thank you for your answer. But the same error as before - class cv::face::FaceRecognizer has no member "getEigenValues". I installed opencv just a month ago and downloaded opencv_contrib repo weeks ago. Do I need to update both and rebuild again?

2015-10-21 09:52:19 -0600 asked a question What is the alternative of getMat() of Facerecognizer class in opencv3.00 ?

I want to follow the tutorials of the face recognition with opencv. But in the tutorial source code example getMat() method is used, which is deprecated in opencv3.0. What can I use instead of getMat() now in opencv3.0?

This is the tutorial I am trying to follow - http://docs.opencv.org/3.0-beta/modul...