Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

how i can add and load data file to train svm classifier?

i read a pdf file that is use FG-NET dataset to train svm. FG-NET Facial Expression and Emotion Database consists of MPEG video files with spontaneous emotions recorded. Database contains examples gathered from 18 subjects ( 9 female and 9 male). Proposed system was trained with captured video frames in which the displayed emotion is very representative. The training set consists of 675 images of seven states neutral and emotional (surprise, fear, disgust, sadness, happiness and anger). i have video files and image files,which of them use to train and which of use to test?

  modules.hpp
/**
*
* header file for 3 modules of FER system
* Face detection 
* Expression description
* Training & Recognition
*/
#ifndefmodules_hpp
#definemodules_hpp
#include "helpers.hpp"
#include "cv.h"
#include "highgui.h"
#include "ml.h"
using namespacecv;
classLandmark
{
public:
Landmark();
Landmark(CvRect box);
CvRect bbox;
boolisEmpty();
intgetX();
intgetY();
CvRect getRect();
};
classEye :publicLandmark
{
public:
Eye(){}
Eye(CvRect box){}
~Eye(){}
};
classEyebrow :publicLandmark
{
public:
CvPoint left;
CvPoint center;
CvPoint right;
Eyebrow(){}
Eyebrow(CvRect box){}
~Eyebrow(){}
};
classMouth :publicLandmark
{
public:
CvPoint left;
CvPoint center;
28
CvPoint upperCenter;
CvPoint lowerCenter;
CvPoint right;
Mouth(){}
Mouth(CvRect box){}
~Mouth(){}
};
classFace :publicLandmark
{
public:
CvRect bbox;
CvRect upperface;
CvRect lowerface;
Eye lefteye;
Eye righteye;
Mouth mouth;
Eyebrow lefteyebrow;
Eyebrow righteyebrow;
/**methods**/
Face(){}
Face(CvRect box){}
~Face(){}
voiddrawBox(IplImage* image, CvRect box);
voiddrawPoints(IplImage *image);
voiddrawElements(IplImage *image);
};
/********detector***********/
classFaceDetection
{
public:
Face face;
private:
CvMemStorage *buffer;
CvHaarClassifierCascade *faceCascade, *reyeCascade, *leyeCascade, *mouthCascade;
CvPoint currentROIlocation;
IplImage *image;
public:
FaceDetection(IplImage *image);
~FaceDetection();
Face getFace();
IplImage* getImage();
voidcalculatePoints();
booldetectElements();
voidsetRegions();
private:
booldetectFace();
voiddetectEyes();
CvRect setBrow(CvRect box);
voiddetectMouth();
voidsetEyebrows();
voidsetAbsoluteCoordinates(CvRect &r);
voidsetAbsoluteCoordinates(CvPoint &p);
voidsetCurrentROIlocation(intx, inty);
CvSeq* getMax(CvSeq * contours, doubleboxarea);
};
29
/*********extraction**********/
classFeatureExtraction
{
public:
FeatureExtraction();
FeatureExtraction(IplImage* upper, IplImage* lower);
~FeatureExtraction();
IplImage *upper;
IplImage *lower;
intindx;
floatfeature_vector[36*256];
public:
voidnormalize(IplImage* upper, IplImage* lower);
voidcalculateLBP();
voidsetLBPGrid(IplImage *img, intwidth, intheight);
};
/****Multiclass Training****/
classMultiTrain 
{
public:
MultiTrain();
~MultiTrain();
CvSVM SVM;
CvMat *trainData;
CvMat *labels;
CvTermCriteria criteria;
CvSVMParams params;
doubleACC;
voidcreateDataSet(string inputdir, string outputdir);
voidloadDataSet(string filename);
voidtrainModel(string outputdir);
voidloadModel(string filename);
intgetPrediction(IplImage *image);
voidtestModel(string filename);
voidcreateConfusionMatrix(string filename);
voidcalculateTrainDataCount(inttab[]);
voidprepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels);
voidperformCrossValidation(intparts);
private:
voidprocessData(string path, inti);
};
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata);
static intcomp_func_x(const void* _a, const void* _b, void* userdata);
#endif
30
modules.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
#include "lbp.hpp"
/****landmark****/
Landmark::Landmark()
{
this->bbox = cvRect(0,0,0,0);
}
Landmark::Landmark(CvRect box)
{
this->bbox = box;
}
boolLandmark::isEmpty()
{
if(this->bbox.height == 0 || this->bbox.width == 0)
return true;
return false;
}
intLandmark::getX()
{
return this->bbox.x;
}
intLandmark::getY()
{
return this->bbox.y;
}
CvRect Landmark::getRect()
{
return this->bbox;
}
/******Face******/
voidFace::drawBox(IplImage* image, CvRect box)
{
cvRectangle(image, cvPoint(box.x, box.y), cvPoint(box.x+box.width, box.y+box.height), CV_RGB(255,0,0),
1, 8, 0);
}
voidFace::drawElements(IplImage *image)
{
this->drawBox(image, this->bbox);
this->drawBox(image, this->lefteye.bbox);
this->drawBox(image, this->righteye.bbox);
this->drawBox(image, this->mouth.bbox);
}
/***********Face Detection*******************/
FaceDetection::FaceDetection(IplImage *image)
{
this->buffer = cvCreateMemStorage(0);
char*face = "../haarcascades/haarcascade_frontalface_default.xml";
char*eye_left= "../haarcascades/haarcascade_mcs_lefteye.xml";
char*eye_right = "../haarcascades/haarcascade_mcs_righteye.xml";
char*mouth = "../haarcascades/haarcascade_mcs_mouth.xml";
this->faceCascade = ( CvHaarClassifierCascade* )cvLoad( face, 0, 0, 0);
31
this->leyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_left, 0, 0, 0);
this->reyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_right, 0, 0, 0);
this->mouthCascade = ( CvHaarClassifierCascade* )cvLoad( mouth, 0, 0, 0);
this->currentROIlocation = cvPoint(0,0);
this->image = image;
this->face = Face();
}
FaceDetection::~FaceDetection()
{
cvReleaseHaarClassifierCascade( &faceCascade);
cvReleaseHaarClassifierCascade( &leyeCascade);
cvReleaseHaarClassifierCascade( &reyeCascade);
cvReleaseHaarClassifierCascade( &mouthCascade);
cvReleaseMemStorage( &buffer);
}
Face FaceDetection::getFace()
{
return this->face;
}
IplImage* FaceDetection::getImage()
{
return this->image;
}
boolFaceDetection::detectElements()
{
if(! this->detectFace()) return false;
this->detectEyes();
this->detectMouth();
this->setEyebrows();
this->setCurrentROIlocation(0,0);
return true;
}
boolFaceDetection::detectFace()
{
CvSeq *faces = cvHaarDetectObjects(this->image, faceCascade, buffer, 1.1, 3, 0, cvSize(30,30));
if(!faces->total) return false;
else
{
/**get the biggest detected face**/
cvSeqSort(faces, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem(faces, 0);
this->face.bbox = *r;
cvClearMemStorage(this->buffer);
}
return true;
}
voidFaceDetection::detectEyes()
{
/*left eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x, this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y);
CvSeq *eyes = cvHaarDetectObjects(this->image, this->leyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(eyes, comp_func, 0);
if( eyes->total != 0)
{
CvRect *left = (CvRect*) cvGetSeqElem( eyes, 0);
this->setAbsoluteCoordinates(*left);
this->face.lefteye.bbox = *left;
32
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
/*right eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y);
CvSeq *reyes = cvHaarDetectObjects(this->image, this->reyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(reyes, comp_func, 0);
if( reyes->total != 0)
{
CvRect *right = (CvRect*) cvGetSeqElem( reyes, 0);
this->setAbsoluteCoordinates(*right);
this->face.righteye.bbox = *right;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::detectMouth()
{
cvSetImageROI(image, cvRect(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2), this->face.bbox.width, (this->face.bbox.height/2)));
setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2));
CvSeq *mouth = cvHaarDetectObjects(image, mouthCascade, buffer, 1.1, 3,0, cvSize(1,1));
if(mouth->total)
{
cvSeqSort(mouth, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem( mouth, 0);
this->setAbsoluteCoordinates(*r);
this->face.mouth.bbox = *r;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::setEyebrows()
{
this->face.lefteyebrow.bbox = this->setBrow(this->face.lefteye.bbox);
this->face.righteyebrow.bbox = this->setBrow(this->face.righteye.bbox);
}
CvRect FaceDetection::setBrow(CvRect box)
{
intx = box.x - box.width/3;
inty = box.y - box.height*3/2;
intwidth = box.width*5/3;
intheight = box.height*2;
returncvRect(x, y, width, height);
}
voidFaceDetection::setRegions()
{
//upper
intx = this->face.lefteyebrow.bbox.x;
inty = this->face.lefteyebrow.bbox.y;
intwidth = this->face.lefteyebrow.bbox.width + this->face.righteyebrow.bbox.width;
intheight = this->face.lefteyebrow.bbox.height + this->face.lefteye.bbox.height;
33
this->face.upperface = cvRect(x, y, width, height);
//lower
x = this->face.lefteye.bbox.x;
width = (this->face.righteye.bbox.x+this->face.righteye.bbox.width) - this->face.lefteye.bbox.x;
y = this->face.mouth.bbox.y - this->face.mouth.bbox.height/2;
height = 2*this->face.mouth.bbox.height;
this->face.lowerface = cvRect(x,y,width,height);
}
voidFaceDetection::setCurrentROIlocation(intx, inty)
{
currentROIlocation.x = x;
currentROIlocation.y = y;
}
voidFaceDetection::setAbsoluteCoordinates(CvRect &r)
{
r.x += currentROIlocation.x;
r.y += currentROIlocation.y;
}
voidFaceDetection::setAbsoluteCoordinates(CvPoint &p)
{
p.x += currentROIlocation.x;
p.y += currentROIlocation.y;
}
/***********feature extraction ***************/
FeatureExtraction::FeatureExtraction()
{
this->upper = 0;
this->lower = 0;
this->indx = 0;
}
FeatureExtraction::FeatureExtraction(IplImage* upper, IplImage* lower)
{
this->normalize(upper, lower);
}
FeatureExtraction::~FeatureExtraction()
{
if(this->upper) cvReleaseImage(&this->upper);
if(this->lower) cvReleaseImage(&this->lower);
}
voidFeatureExtraction::normalize(IplImage* upper, IplImage* lower)
{
this->upper = cvCreateImage(cvSize(90,48), upper->depth, upper->nChannels);
this->lower = cvCreateImage(cvSize(72,48), lower->depth, lower->nChannels);
cvResize(upper, this->upper);
cvResize(lower, this->lower);
}
voidFeatureExtraction::calculateLBP()
{
this->indx = 0;
this->setLBPGrid(this->lower, 18, 12);
this->setLBPGrid(this->upper, 18, 12);
}
voidFeatureExtraction::setLBPGrid(IplImage *img, intwidth, intheight)
{
for(inti=0; i< (img->width/width); i++)
34
for(intj=0; j< (img->height/height); j++)
{
LBP lbp;
cvSetImageROI(img, cvRect(i*width,j*height, width, height));
lbp.createLBP(img);
lbp.histogram();
lbp.fillFeatureSet(this->feature_vector, this->indx);
this->indx+=256;
//cvRectangle(img, cvPoint(0,0), cvPoint(width, height), cvScalar(255,0,0));
cvResetImageROI(img);
}
}
/******MulticlassTraining****/
MultiTrain::MultiTrain()
{
this->trainData = 0;
this->labels = 0;
this->params = CvSVMParams();
this->params.term_crit.epsilon = 1.0000000116860974e-007;
this->params.term_crit.type = CV_TERMCRIT_EPS;
this->params.svm_type = CvSVM::C_SVC;
this->params.kernel_type = CvSVM::RBF;
this->params.gamma = 3.0000000000000001e-006;
this->params.C = 20;
}
MultiTrain::~MultiTrain()
{
if(this->trainData) cvReleaseMat(&this->trainData);
if(this->labels) cvReleaseMat(&this->labels);
}
voidMultiTrain::createDataSet(string inputdir, string outputdir)
{
/*
0 neutral
1 happiness
2 sadness
3 surprise
4 anger
5 fear
6 disgust
*/
vector<string> neutral = vector<string>();
vector<string> happy = vector<string>();
vector<string> sad = vector<string>();
vector<string> surprise = vector<string>();
vector<string> angry = vector<string>();
vector<string> fear = vector<string>();
vector<string> disgust = vector<string>();
listFiles(inputdir, "*neutr*", neutral);
listFiles(inputdir, "*happy*", happy);
listFiles(inputdir, "*sad*", sad);
listFiles(inputdir, "*surpr*", surprise);
listFiles(inputdir, "*ang*", angry);
listFiles(inputdir, "*fear*", fear);
listFiles(inputdir, "*disg*", disgust);
intcount = (int)(neutral.size()+happy.size()+sad.size()+surprise.size()+angry.size()+fear.size()+disgust.size());
35
cout<<count<<endl;
this->trainData = cvCreateMat(count, 36*256, CV_32FC1);
this->labels = cvCreateMat(count, 1, CV_32SC1);
cvZero(this->trainData);
cvZero(this->labels);
intj=0;
for(inti=0; i< (int)neutral.size(); i++)
{
cout<<"processing image # "<<i<<endl;
this->processData(inputdir+"/"+neutral[i], i);
CV_MAT_ELEM(*this->labels, int, i,0) = 0;
}
j += (int) neutral.size();
for(inti=0; i< (int)happy.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+happy[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 1;
}
j += (int) happy.size();
for(inti=0; i< (int)sad.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+sad[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 2;
}
j += (int) sad.size();
for(inti=0; i< (int)surprise.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+surprise[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 3;
}
j += (int) surprise.size();
for(inti=0; i< (int)angry.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+angry[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 4;
}
j += (int) angry.size();
for(inti=0; i< (int)fear.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+fear[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 5;
}
j += (int) fear.size();
for(inti=0; i< (int)disgust.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+disgust[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0)= 6;
}
CvFileStorage *file = cvOpenFileStorage((outputdir+"/emotrainset.xml").c_str(), 0, CV_STORAGE_WRITE);
cvWrite(file, "dataset", this->trainData);
cvWrite(file, "labels", this->labels);
36
cvReleaseFileStorage(&file);
}
voidMultiTrain::loadDataSet(string filename)
{
CvFileStorage* file = cvOpenFileStorage(filename.c_str(), 0, CV_STORAGE_READ);
this->trainData = (CvMat*)cvRead(file, cvGetFileNodeByName(file,0, "dataset"));
this->labels = (CvMat*) cvRead(file, cvGetFileNodeByName(file,0, "labels"));
cvReleaseFileStorage(&file);
}
voidMultiTrain::trainModel(string outputdir)
{
cout<<"Training the SVM classifier......"<<endl;
SVM.train(this->trainData, this->labels, 0,0,this->params);
SVM.save((outputdir+"/emo_svm_model.xml").c_str());
cout<<"SVM model saved to file: "<<"emo_svm_model.xml"<<endl;
}
voidMultiTrain::loadModel(string filename)
{
this->SVM.load(filename.c_str());
}
voidMultiTrain::calculateTrainDataCount(inttab[])
{
for(inti=0; i<this->labels->rows; i++)
{
tab[CV_MAT_ELEM(*this->labels, int, i,0)]++;
}
}
voidMultiTrain::prepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels)
{
intclass_integral[7] = {0};
for(inti=1; i<7; i++)
{
class_integral[i] = calcSum(class_counts,0,i-1);
}
inttest_iter, train_iter;
test_iter = train_iter = 0;
inttype = -1;
for(inti=0; i<this->trainData->rows; i++)
{
if(i < (class_integral[0]+class_counts[0]))
type = 0;
else if(i < (class_integral[1]+class_counts[1]))
type = 1;
else if(i < (class_integral[2]+class_counts[2]))
type = 2;
else if(i < (class_integral[3]+class_counts[3]))
type = 3;
else if(i < (class_integral[4]+class_counts[4]))
type = 4;
else if(i < (class_integral[5]+class_counts[5]))
type = 5;
else if(i < (class_integral[6]+class_counts[6]))
type = 6;
if(type>=0)
{
37
CvMat *r = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, r, i);
if((i >=(part*counts[type]+class_integral[type])) && (i<((part+1)*counts[type]
+class_integral[type])))
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*testdata, float, test_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*testlabels, int, test_iter, 0) = type;
}
test_iter++;
}
else
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*traindata, float, train_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*trainlabels, int, train_iter, 0) = type;
}
train_iter++;
}
}
}
}
voidMultiTrain::performCrossValidation(intparts)
{
vector<double> test_results;
intclass_counts[7] = {0};
intcounts [7] = {0};
if( parts !=0)
{
calculateTrainDataCount(class_counts);
for(inti=0; i<7; i++)
{
counts[i] = (class_counts[i]/parts);
}
}
if(parts == 0 || parts == 1) cout<<"Cross validation cannot be performed for such input values"<<endl;
else if(calcSum(counts, 7)<7) cout<<"The database is too small for performing the "<<parts<<"-fold cross
validation."<<endl;
else
{
//MAIN LOOP
for(intp=0; p<parts; p++)
{
//CREATE SETS
CvMat *traindata = 0;
CvMat *trainlabels = 0;
CvMat *testdata = 0;
CvMat *testlabels = 0;
testdata = cvCreateMat(calcSum(counts, 7), 36*256, CV_32FC1);
testlabels = cvCreateMat(calcSum(counts, 7),1, CV_32SC1);
traindata = cvCreateMat(this->trainData->rows - calcSum(counts, 7), 36*256, CV_32FC1);
trainlabels = cvCreateMat(this->trainData->rows - calcSum(counts,7), 1, CV_32SC1); 
38
//PREPARE SETS
this->prepareSets(class_counts, counts, p, parts, traindata, trainlabels, testdata, testlabels);
//PERFORM TRAINING
cout<<"Training the SVM classifier...part#"<<p<<endl;
SVM.train(traindata, trainlabels, 0,0, this->params);
//PERFORM TESTING
intTP = 0;
for(inti=0; i<(int)traindata->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(traindata, row, i);
intres = (int)this->SVM.predict(row);
if(res == trainlabels->data.i[i]) 
TP++;
}
doubleaccuracy = (double)TP/(double)traindata->rows;
cout<<"accuracy for part#"<<p<<" : "<<accuracy<<endl;
//RELEASE SETS
cvReleaseMat(&traindata);
cvReleaseMat(&trainlabels);
cvReleaseMat(&testdata);
cvReleaseMat(&testlabels);
this->SVM.clear();
cout<<"-----------------------------------------"<<endl;
}
}
}
intMultiTrain::getPrediction(IplImage *image)
{
intres = 0;
FaceDetection fd(image);
if( !fd.detectElements()) return-3;
fd.setRegions();
Face f = fd.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
CvMat *mat = cvCreateMat(1, 36*256, CV_32FC1);
//cvInitMatHeader(mat, 1, 36*256, CV_32FC1, fe.feature_vector);
for(inti=0; i<mat->cols; i++)
mat->data.fl[i] = fe.feature_vector[i];
res = (int) this->SVM.predict(mat);
cvReleaseMat(&mat);
returnres;
}
voidMultiTrain::testModel(string filename)
39
{
this->loadDataSet(filename);
intTP = 0; //true prediction counter
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<TP<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::createConfusionMatrix(string filename)
{
intconfusionMatrix[7][7]= {0};
intoverall[7] = {0};
intTP = 0;
this->loadDataSet(filename);
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
confusionMatrix[this->labels->data.i[i]][res]++;
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<"-------------------------------------"<<endl;
cout<<"----------confusion matrix-----------"<<endl;
for(inti=0; i<7; i++)
{
overall[i] = calcSum(confusionMatrix[i], 7);
for(intj=0; j<7; j++)
{
doubleperc = (double)confusionMatrix[i][j]*100/(double)overall[i];
cout<<perc<<"\t";
}
cout<<endl;
}
cout<<"-------------------------------------"<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::processData(string path, inti)
{
//(inputdir+"/"+filenames[i]).c_str()
IplImage *image = cvLoadImage(path.c_str(), 1);
FaceDetection detector(image);
if( detector.detectElements())
{
detector.setRegions();
40
Face f = detector.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
for(intj=0; j<36*256; j++)
{
this->trainData->data.fl[i*36*256+j] = fe.feature_vector[j];
}
cvReleaseImage(&up);
cvReleaseImage(&lo);
}
cvReleaseImage(&image);
}
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
intareaA = a->width * a->height;
intareaB = b->width * b->height;
if(areaA < areaB) return1;
else return-1;
}
static intcomp_func_x(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
if(a->x > b->x) return1;
else return-1;
}
lbp.hpp
/**
* author:
* class for LBP encoding
*/
#include "modules.hpp"
#include "cxcore.h"
classLBP 
{ 
public: 
LBP(); 
public: 
~LBP(); 
41
IplImage* image; 
IplImage* LBPimage; 
CvHistogram* hist;
public: 
/* calculate LBP features */
voidcreateLBP(IplImage *patch); 
/* create histogram of LBP features */
voidhistogram();
/* copy histogram to feature set */
voidLBP::fillFeatureSet(float*set, intstart_indx);
}; 
lbp.cpp
/**
* author: 
*/
#include "lbp.hpp"
LBP::LBP()
{
image =0;
LBPimage =0;
hist =0;
}
LBP::~LBP()
{
if(image) cvReleaseImage(&image);
if(LBPimage) cvReleaseImage(&LBPimage);
}
voidLBP::createLBP(IplImage* patch)
{
IplImage* temp_image = cvCreateImage(cvGetSize(patch), patch->depth, patch->nChannels);
cvCopy(patch, temp_image);
image = cvCreateImage(cvSize(temp_image->width, temp_image->height), 8, 1); 
if(temp_image->nChannels == 3) 
{
cvCvtColor(temp_image, image, CV_BGR2GRAY); 
}
LBPimage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
intcenter=0; 
intcenter_lbp=0; 
for(introw=1; row<image->height-1; row++) 
{
for(intcol=1; col<image->width-1; col++) 
{
center = cvGetReal2D(image, row, col);
center_lbp = 0; 
if(center >= cvGetReal2D(image, row-1, col-1)) 
{
center_lbp += 1; 
}
if(center >= cvGetReal2D(image, row-1, col)) 
42
{
center_lbp += 2; 
}
if(center >= cvGetReal2D(image, row-1, col+1)) 
{
center_lbp += 4; 
}
if(center >= cvGetReal2D(image, row, col-1)) 
{
center_lbp += 8; 
}
if(center >= cvGetReal2D(image, row, col+1)) 
{
center_lbp += 16; 
}
if(center >= cvGetReal2D(image, row+1, col-1)) 
{
center_lbp += 32; 
}
if(center >= cvGetReal2D(image, row+1, col)) 
{
center_lbp += 64; 
}
if(center >= cvGetReal2D(image, row+1, col+1)) 
{
center_lbp += 128; 
}
cvSetReal2D(LBPimage, row, col, center_lbp); 
}
}
cvReleaseImage(&temp_image); 
}
voidLBP::histogram()
{
intbins = 256;
inthsize[] = {bins};
floatrange[] = {0,256};
float* ranges[] = {range};
floatmin_value =0, max_value = 0;
IplImage * planes[] = {this->LBPimage};
this->hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(planes, this->hist, 0,0);
}
voidLBP::fillFeatureSet(float*set, intstart_indx)
{
for(inti=0; i<256; i++)
{
set[i+start_indx] = cvQueryHistValue_1D(hist, i);
}
}
43
helpers.hpp
/**
* author: 
* set of different functions
*/
#ifndefhelpers_hpp
#definehelpers_hpp
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#include <math.h>
#include <iostream>
using namespacestd;
/* function for listing files that match the pattern from directory*/
voidlistFiles(string directory, string pattern, vector<string> &files);
/* function for concatenating strings with integers */
string createSname(string path, string fname, string f, intindx, string ext);
/* function for concatenating strings - creating temporary file names */
string createFname(string path, string fname, string ext);
/* function for translating emotion codes */
string showResult(intcode);
/* function for suming the array values*/
intcalcSum(int*tab, intn);
/* function for suming the array values from indx1 to indx2*/
intcalcSum(int*tab, intidx1, intidx2);
#endif
helpers.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
voidlistFiles (string directory, string pattern, vector<string> &files)
{
string command = "dir "+directory+"\\"+pattern+" /B > temp.txt";
string d;
system(command.c_str());
ifstream in;
in.open("temp.txt", ifstream::in);
if( in.is_open())
{
while(true)
{
if(!(in>>d)) break;
files.push_back(d);
}
in.close();
}
44
}
string createSname(string path, string fname, string f, intindx, string ext)
{
//path +filename+ frame+ idnx + ext
string t;
stringstream s;
s<< indx;
s>> t;
string result = path+fname+f+t+ext;
returnresult;
}
string createFname(string path, string fname, string ext)
{
string result= path+fname+ext;
returnresult;
}
string showResult(intcode)
{
string result="";
switch(code)
{
case0:
result = "neutral";
break;
case1:
result = "happy";
break;
case2:
result = "sad";
break;
case3:
result = "surprised";
break;
case4:
result = "angry";
break;
case5:
result = "fear";
break;
case6:
result = "disgusted";
break;
}
returnresult;
}
intcalcSum(int*tab, intn)
{
intsum =0;
for(inti =0; i<n; i++)
sum+= tab[i];
returnsum;
}
intcalcSum(int*tab, intidx1, intidx2)
{
intsum=0;
for(inti=idx1; i<=idx2; i++)
sum+= tab[i];
45
returnsum;
}
tasks.hpp
/**
* author 
* functions for performing particular tasks
*/
#ifndeftasks_hpp
#definetasks_hpp
#include "modules.hpp"
/** show image **/
voidshow(IplImage *im);
/** sample emotion recognition **/
voidsample(string filename);
/** perform face detection **/
voidprocessDetection(string inputdir, string outputdir, string pattern);
/** capture snaphots (frames) from a video file **/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern);
/*****DEMO****/
classDemo
{
public:
Demo(string filename, string type);
~Demo();
vector<int> predictions;
string videofile;
string type;
voidprocessVideo();
voiddisplayPredictions(boolsave);
voidgetStatistics();
};
/************/
#endif
tasks.cpp
/**
* author: 
*/
#include "tasks.hpp"
/*********************DEMO***********************************/
Demo::Demo(string filename, string type)
{
this->videofile = filename;
this->predictions = vector<int>();
this->type = type;
}
46
Demo::~Demo(){}
voidDemo::processVideo()
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
cout<<"Loading classifier...."<<endl;
MultiTrain mt;
string path = "..\\datasets\\"+type+"\\";
mt.loadModel((path+"emo_svm_model.xml"));
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
{
cout<<"Processing video...."<<endl;
IplImage *frame=0;
intk = 0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if( k%10 == 0)
{
intres = (int)mt.getPrediction(frame);
cout<<"Prediction for frame #"<<k<<" => "<<res<<endl;
this->predictions.push_back(res);
}
k++;
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
}
voidDemo::displayPredictions(boolsave)
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
CvVideoWriter *writer = 0;
intisColor = 1;
intfps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
intframeWidth = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
intframeHeight = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
if(save)
{
writer = cvCreateVideoWriter("demo.avi",-1, fps, cvSize(frameWidth, frameHeight), isColor);
}
cvNamedWindow( "preview", 1 );
CvFont font;
cvInitFont(&font,0, 1.1f, 1.1f,0,2,8); 
string result ="";
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
47
{
IplImage *frame=0;
intk = 0, l=0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame) break;
if( k%10 == 0)
{ 
result = showResult(this->predictions[l++]);
cout<<"Prediction for frame #"<<k<<" => "<<result<<endl;
}
cvPutText(frame, result.c_str(), cvPoint(50,50), &font, cvScalar(255,0,0));
cvShowImage("preview",frame);
if(save && writer) 
{
cvWriteFrame(writer, frame);
}
cvWaitKey(1000/fps);
k++; 
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
cvDestroyWindow("preview");
if(writer) cvReleaseVideoWriter(&writer);
}
voidDemo::getStatistics()
{
intn, h, sd, su, a, f,d;
n=h=sd=su=a=f=d=0;
for(inti=0; i< (int)this->predictions.size(); i++)
{
switch(this->predictions[i])
{
case0:
n++; break;
case1:
h++; break;
case2:
sd++; break;
case3:
su++; break;
case4:
a++; break;
case5:
f++; break;
case6:
d++; break;
}
}
cout<<"---------------------------------"<<endl;
cout<<"Emotions recognized:"<<endl;
cout<<"Neutral: " << (double)n/(double)this->predictions.size() <<endl;
cout<<"Happy: " << (double)h/(double)this->predictions.size() <<endl;
cout<<"Sad: " << (double)sd/(double)this->predictions.size() <<endl;
cout<<"Suprised: " << (double)su/(double)this->predictions.size() <<endl;
cout<<"Angry: " << (double)a/(double)this->predictions.size() <<endl;
cout<<"Fear: " << (double)f/(double)this->predictions.size() <<endl;
48
cout<<"Disgusted: " << (double)d/(double)this->predictions.size() <<endl;
cout<<"---------------------------------"<<endl;
}
/********************************************************/
voidshow(IplImage *im)
{
cvNamedWindow("preview");
cvShowImage("preview", im);
cvWaitKey(0);
cvDestroyWindow("preview");
}
/********************************************************/
voidsample(string filename, string type)
{
CvFont font;
cvInitFont(&font,0, 1.0f, 1.0f,0,2,8); 
IplImage *img = cvLoadImage(filename.c_str(), 1);
MultiTrain mt;
cout<<"Loading the classifier...."<<endl;
mt.loadModel("..\\datasets\\"+type+"\\emo_svm_model.xml");
intres = mt.getPrediction(img);
string result = showResult(res);
cvPutText(img, result.c_str(), cvPoint(20,20), &font, cvScalar(255,255,0));
cout<<result<<endl;
show(img);
cvReleaseImage(&img);
}
/************************************************************/
voidprocessDetection(string inputdir, string outputdir, string pattern="*.jpg")
{
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames);
try
{
IplImage *image=0;
for(inti=0; i<(int)filenames.size(); i++)
{
image = cvLoadImage((inputdir+"/"+filenames[i]).c_str(), 1);
FaceDetection detector(image);
if( ! detector.detectElements()) continue;
detector.detectElements();
detector.setRegions();
Face f = detector.getFace();
cvRectangle(image, cvPoint(f.upperface.x, f.upperface.y), cvPoint(f.upperface.x+
f.upperface.width, f.upperface.y+f.upperface.height), cvScalar(0,255,255));
cvRectangle(image, cvPoint(f.lowerface.x, f.lowerface.y),
cvPoint(f.lowerface.x+f.lowerface.width, f.lowerface.y+f.lowerface.height), cvScalar(0,0,255));
cvSaveImage((outputdir+"/"+filenames[i]).c_str(), image);
cout<<"image #"<<i<<(outputdir+"/"+filenames[i]).c_str()<<" is being saved..."<<endl;
}
cvReleaseImage(&image);
}
catch(cv::Exception &e)
{
cout<<e.what()<<endl;
49
}
}
/**********************************************************************/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern="*")
{
CvCapture *capture = 0;
cvNamedWindow( "preview", 1);
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames); 
for(inti=0; i< (int) filenames.size(); i++)
{
capture = cvCaptureFromAVI((inputdir+"/"+filenames[i]).c_str());
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
continue;
}
else
{
IplImage *frame=0;
intk=1;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if(k%rate == 1)
{
//if(k>30)
{
string fname = filenames[i].substr(0, (filenames[i].length()-4));
fname = createSname(outputdir+"/", fname, "_frame", k, ".jpg");
cvSaveImage(fname.c_str(), frame); 
cout<<"file: "<<fname<<" is saving..."<<endl;
}
}
k++;
cvShowImage("preview",frame);
cvWaitKey(1000/fps);
}
cvReleaseImage(&frame);
}
}

cvReleaseCapture(&capture);
cvDestroyWindow("preview");
}
50
main.cpp
#include "modules.hpp"
#include "lbp.hpp"
#include "tasks.hpp"
voidplayDemo(string type)
{
//string videofile = "../sample_videos/disgs_0004_3.mpg";
string videofile = "../sample_videos/happy_0014_1.mpg";
//string videofile = "../sample_videos/sadns_0005_1.mpg";
Demo demo(videofile, type);
demo.processVideo();
demo.displayPredictions(false);
demo.getStatistics();
}
intmain()
{
try
{
playDemo("FEED");
}
catch( Exception &e)
{
cout<<e.what()<<endl;
}
system("pause");
return0;
}
click to hide/show revision 2
retagged

updated 2014-02-12 03:57:57 -0600

berak gravatar image

how i can add and load data file to train svm classifier?

i read a pdf file that is use FG-NET dataset to train svm. FG-NET Facial Expression and Emotion Database consists of MPEG video files with spontaneous emotions recorded. Database contains examples gathered from 18 subjects ( 9 female and 9 male). Proposed system was trained with captured video frames in which the displayed emotion is very representative. The training set consists of 675 images of seven states neutral and emotional (surprise, fear, disgust, sadness, happiness and anger). i have video files and image files,which of them use to train and which of use to test?

  modules.hpp
/**
*
* header file for 3 modules of FER system
* Face detection 
* Expression description
* Training & Recognition
*/
#ifndefmodules_hpp
#definemodules_hpp
#include "helpers.hpp"
#include "cv.h"
#include "highgui.h"
#include "ml.h"
using namespacecv;
classLandmark
{
public:
Landmark();
Landmark(CvRect box);
CvRect bbox;
boolisEmpty();
intgetX();
intgetY();
CvRect getRect();
};
classEye :publicLandmark
{
public:
Eye(){}
Eye(CvRect box){}
~Eye(){}
};
classEyebrow :publicLandmark
{
public:
CvPoint left;
CvPoint center;
CvPoint right;
Eyebrow(){}
Eyebrow(CvRect box){}
~Eyebrow(){}
};
classMouth :publicLandmark
{
public:
CvPoint left;
CvPoint center;
28
CvPoint upperCenter;
CvPoint lowerCenter;
CvPoint right;
Mouth(){}
Mouth(CvRect box){}
~Mouth(){}
};
classFace :publicLandmark
{
public:
CvRect bbox;
CvRect upperface;
CvRect lowerface;
Eye lefteye;
Eye righteye;
Mouth mouth;
Eyebrow lefteyebrow;
Eyebrow righteyebrow;
/**methods**/
Face(){}
Face(CvRect box){}
~Face(){}
voiddrawBox(IplImage* image, CvRect box);
voiddrawPoints(IplImage *image);
voiddrawElements(IplImage *image);
};
/********detector***********/
classFaceDetection
{
public:
Face face;
private:
CvMemStorage *buffer;
CvHaarClassifierCascade *faceCascade, *reyeCascade, *leyeCascade, *mouthCascade;
CvPoint currentROIlocation;
IplImage *image;
public:
FaceDetection(IplImage *image);
~FaceDetection();
Face getFace();
IplImage* getImage();
voidcalculatePoints();
booldetectElements();
voidsetRegions();
private:
booldetectFace();
voiddetectEyes();
CvRect setBrow(CvRect box);
voiddetectMouth();
voidsetEyebrows();
voidsetAbsoluteCoordinates(CvRect &r);
voidsetAbsoluteCoordinates(CvPoint &p);
voidsetCurrentROIlocation(intx, inty);
CvSeq* getMax(CvSeq * contours, doubleboxarea);
};
29
/*********extraction**********/
classFeatureExtraction
{
public:
FeatureExtraction();
FeatureExtraction(IplImage* upper, IplImage* lower);
~FeatureExtraction();
IplImage *upper;
IplImage *lower;
intindx;
floatfeature_vector[36*256];
public:
voidnormalize(IplImage* upper, IplImage* lower);
voidcalculateLBP();
voidsetLBPGrid(IplImage *img, intwidth, intheight);
};
/****Multiclass Training****/
classMultiTrain 
{
public:
MultiTrain();
~MultiTrain();
CvSVM SVM;
CvMat *trainData;
CvMat *labels;
CvTermCriteria criteria;
CvSVMParams params;
doubleACC;
voidcreateDataSet(string inputdir, string outputdir);
voidloadDataSet(string filename);
voidtrainModel(string outputdir);
voidloadModel(string filename);
intgetPrediction(IplImage *image);
voidtestModel(string filename);
voidcreateConfusionMatrix(string filename);
voidcalculateTrainDataCount(inttab[]);
voidprepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels);
voidperformCrossValidation(intparts);
private:
voidprocessData(string path, inti);
};
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata);
static intcomp_func_x(const void* _a, const void* _b, void* userdata);
#endif
30
modules.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
#include "lbp.hpp"
/****landmark****/
Landmark::Landmark()
{
this->bbox = cvRect(0,0,0,0);
}
Landmark::Landmark(CvRect box)
{
this->bbox = box;
}
boolLandmark::isEmpty()
{
if(this->bbox.height == 0 || this->bbox.width == 0)
return true;
return false;
}
intLandmark::getX()
{
return this->bbox.x;
}
intLandmark::getY()
{
return this->bbox.y;
}
CvRect Landmark::getRect()
{
return this->bbox;
}
/******Face******/
voidFace::drawBox(IplImage* image, CvRect box)
{
cvRectangle(image, cvPoint(box.x, box.y), cvPoint(box.x+box.width, box.y+box.height), CV_RGB(255,0,0),
1, 8, 0);
}
voidFace::drawElements(IplImage *image)
{
this->drawBox(image, this->bbox);
this->drawBox(image, this->lefteye.bbox);
this->drawBox(image, this->righteye.bbox);
this->drawBox(image, this->mouth.bbox);
}
/***********Face Detection*******************/
FaceDetection::FaceDetection(IplImage *image)
{
this->buffer = cvCreateMemStorage(0);
char*face = "../haarcascades/haarcascade_frontalface_default.xml";
char*eye_left= "../haarcascades/haarcascade_mcs_lefteye.xml";
char*eye_right = "../haarcascades/haarcascade_mcs_righteye.xml";
char*mouth = "../haarcascades/haarcascade_mcs_mouth.xml";
this->faceCascade = ( CvHaarClassifierCascade* )cvLoad( face, 0, 0, 0);
31
this->leyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_left, 0, 0, 0);
this->reyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_right, 0, 0, 0);
this->mouthCascade = ( CvHaarClassifierCascade* )cvLoad( mouth, 0, 0, 0);
this->currentROIlocation = cvPoint(0,0);
this->image = image;
this->face = Face();
}
FaceDetection::~FaceDetection()
{
cvReleaseHaarClassifierCascade( &faceCascade);
cvReleaseHaarClassifierCascade( &leyeCascade);
cvReleaseHaarClassifierCascade( &reyeCascade);
cvReleaseHaarClassifierCascade( &mouthCascade);
cvReleaseMemStorage( &buffer);
}
Face FaceDetection::getFace()
{
return this->face;
}
IplImage* FaceDetection::getImage()
{
return this->image;
}
boolFaceDetection::detectElements()
{
if(! this->detectFace()) return false;
this->detectEyes();
this->detectMouth();
this->setEyebrows();
this->setCurrentROIlocation(0,0);
return true;
}
boolFaceDetection::detectFace()
{
CvSeq *faces = cvHaarDetectObjects(this->image, faceCascade, buffer, 1.1, 3, 0, cvSize(30,30));
if(!faces->total) return false;
else
{
/**get the biggest detected face**/
cvSeqSort(faces, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem(faces, 0);
this->face.bbox = *r;
cvClearMemStorage(this->buffer);
}
return true;
}
voidFaceDetection::detectEyes()
{
/*left eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x, this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y);
CvSeq *eyes = cvHaarDetectObjects(this->image, this->leyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(eyes, comp_func, 0);
if( eyes->total != 0)
{
CvRect *left = (CvRect*) cvGetSeqElem( eyes, 0);
this->setAbsoluteCoordinates(*left);
this->face.lefteye.bbox = *left;
32
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
/*right eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y);
CvSeq *reyes = cvHaarDetectObjects(this->image, this->reyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(reyes, comp_func, 0);
if( reyes->total != 0)
{
CvRect *right = (CvRect*) cvGetSeqElem( reyes, 0);
this->setAbsoluteCoordinates(*right);
this->face.righteye.bbox = *right;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::detectMouth()
{
cvSetImageROI(image, cvRect(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2), this->face.bbox.width, (this->face.bbox.height/2)));
setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2));
CvSeq *mouth = cvHaarDetectObjects(image, mouthCascade, buffer, 1.1, 3,0, cvSize(1,1));
if(mouth->total)
{
cvSeqSort(mouth, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem( mouth, 0);
this->setAbsoluteCoordinates(*r);
this->face.mouth.bbox = *r;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::setEyebrows()
{
this->face.lefteyebrow.bbox = this->setBrow(this->face.lefteye.bbox);
this->face.righteyebrow.bbox = this->setBrow(this->face.righteye.bbox);
}
CvRect FaceDetection::setBrow(CvRect box)
{
intx = box.x - box.width/3;
inty = box.y - box.height*3/2;
intwidth = box.width*5/3;
intheight = box.height*2;
returncvRect(x, y, width, height);
}
voidFaceDetection::setRegions()
{
//upper
intx = this->face.lefteyebrow.bbox.x;
inty = this->face.lefteyebrow.bbox.y;
intwidth = this->face.lefteyebrow.bbox.width + this->face.righteyebrow.bbox.width;
intheight = this->face.lefteyebrow.bbox.height + this->face.lefteye.bbox.height;
33
this->face.upperface = cvRect(x, y, width, height);
//lower
x = this->face.lefteye.bbox.x;
width = (this->face.righteye.bbox.x+this->face.righteye.bbox.width) - this->face.lefteye.bbox.x;
y = this->face.mouth.bbox.y - this->face.mouth.bbox.height/2;
height = 2*this->face.mouth.bbox.height;
this->face.lowerface = cvRect(x,y,width,height);
}
voidFaceDetection::setCurrentROIlocation(intx, inty)
{
currentROIlocation.x = x;
currentROIlocation.y = y;
}
voidFaceDetection::setAbsoluteCoordinates(CvRect &r)
{
r.x += currentROIlocation.x;
r.y += currentROIlocation.y;
}
voidFaceDetection::setAbsoluteCoordinates(CvPoint &p)
{
p.x += currentROIlocation.x;
p.y += currentROIlocation.y;
}
/***********feature extraction ***************/
FeatureExtraction::FeatureExtraction()
{
this->upper = 0;
this->lower = 0;
this->indx = 0;
}
FeatureExtraction::FeatureExtraction(IplImage* upper, IplImage* lower)
{
this->normalize(upper, lower);
}
FeatureExtraction::~FeatureExtraction()
{
if(this->upper) cvReleaseImage(&this->upper);
if(this->lower) cvReleaseImage(&this->lower);
}
voidFeatureExtraction::normalize(IplImage* upper, IplImage* lower)
{
this->upper = cvCreateImage(cvSize(90,48), upper->depth, upper->nChannels);
this->lower = cvCreateImage(cvSize(72,48), lower->depth, lower->nChannels);
cvResize(upper, this->upper);
cvResize(lower, this->lower);
}
voidFeatureExtraction::calculateLBP()
{
this->indx = 0;
this->setLBPGrid(this->lower, 18, 12);
this->setLBPGrid(this->upper, 18, 12);
}
voidFeatureExtraction::setLBPGrid(IplImage *img, intwidth, intheight)
{
for(inti=0; i< (img->width/width); i++)
34
for(intj=0; j< (img->height/height); j++)
{
LBP lbp;
cvSetImageROI(img, cvRect(i*width,j*height, width, height));
lbp.createLBP(img);
lbp.histogram();
lbp.fillFeatureSet(this->feature_vector, this->indx);
this->indx+=256;
//cvRectangle(img, cvPoint(0,0), cvPoint(width, height), cvScalar(255,0,0));
cvResetImageROI(img);
}
}
/******MulticlassTraining****/
MultiTrain::MultiTrain()
{
this->trainData = 0;
this->labels = 0;
this->params = CvSVMParams();
this->params.term_crit.epsilon = 1.0000000116860974e-007;
this->params.term_crit.type = CV_TERMCRIT_EPS;
this->params.svm_type = CvSVM::C_SVC;
this->params.kernel_type = CvSVM::RBF;
this->params.gamma = 3.0000000000000001e-006;
this->params.C = 20;
}
MultiTrain::~MultiTrain()
{
if(this->trainData) cvReleaseMat(&this->trainData);
if(this->labels) cvReleaseMat(&this->labels);
}
voidMultiTrain::createDataSet(string inputdir, string outputdir)
{
/*
0 neutral
1 happiness
2 sadness
3 surprise
4 anger
5 fear
6 disgust
*/
vector<string> neutral = vector<string>();
vector<string> happy = vector<string>();
vector<string> sad = vector<string>();
vector<string> surprise = vector<string>();
vector<string> angry = vector<string>();
vector<string> fear = vector<string>();
vector<string> disgust = vector<string>();
listFiles(inputdir, "*neutr*", neutral);
listFiles(inputdir, "*happy*", happy);
listFiles(inputdir, "*sad*", sad);
listFiles(inputdir, "*surpr*", surprise);
listFiles(inputdir, "*ang*", angry);
listFiles(inputdir, "*fear*", fear);
listFiles(inputdir, "*disg*", disgust);
intcount = (int)(neutral.size()+happy.size()+sad.size()+surprise.size()+angry.size()+fear.size()+disgust.size());
35
cout<<count<<endl;
this->trainData = cvCreateMat(count, 36*256, CV_32FC1);
this->labels = cvCreateMat(count, 1, CV_32SC1);
cvZero(this->trainData);
cvZero(this->labels);
intj=0;
for(inti=0; i< (int)neutral.size(); i++)
{
cout<<"processing image # "<<i<<endl;
this->processData(inputdir+"/"+neutral[i], i);
CV_MAT_ELEM(*this->labels, int, i,0) = 0;
}
j += (int) neutral.size();
for(inti=0; i< (int)happy.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+happy[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 1;
}
j += (int) happy.size();
for(inti=0; i< (int)sad.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+sad[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 2;
}
j += (int) sad.size();
for(inti=0; i< (int)surprise.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+surprise[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 3;
}
j += (int) surprise.size();
for(inti=0; i< (int)angry.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+angry[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 4;
}
j += (int) angry.size();
for(inti=0; i< (int)fear.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+fear[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 5;
}
j += (int) fear.size();
for(inti=0; i< (int)disgust.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+disgust[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0)= 6;
}
CvFileStorage *file = cvOpenFileStorage((outputdir+"/emotrainset.xml").c_str(), 0, CV_STORAGE_WRITE);
cvWrite(file, "dataset", this->trainData);
cvWrite(file, "labels", this->labels);
36
cvReleaseFileStorage(&file);
}
voidMultiTrain::loadDataSet(string filename)
{
CvFileStorage* file = cvOpenFileStorage(filename.c_str(), 0, CV_STORAGE_READ);
this->trainData = (CvMat*)cvRead(file, cvGetFileNodeByName(file,0, "dataset"));
this->labels = (CvMat*) cvRead(file, cvGetFileNodeByName(file,0, "labels"));
cvReleaseFileStorage(&file);
}
voidMultiTrain::trainModel(string outputdir)
{
cout<<"Training the SVM classifier......"<<endl;
SVM.train(this->trainData, this->labels, 0,0,this->params);
SVM.save((outputdir+"/emo_svm_model.xml").c_str());
cout<<"SVM model saved to file: "<<"emo_svm_model.xml"<<endl;
}
voidMultiTrain::loadModel(string filename)
{
this->SVM.load(filename.c_str());
}
voidMultiTrain::calculateTrainDataCount(inttab[])
{
for(inti=0; i<this->labels->rows; i++)
{
tab[CV_MAT_ELEM(*this->labels, int, i,0)]++;
}
}
voidMultiTrain::prepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels)
{
intclass_integral[7] = {0};
for(inti=1; i<7; i++)
{
class_integral[i] = calcSum(class_counts,0,i-1);
}
inttest_iter, train_iter;
test_iter = train_iter = 0;
inttype = -1;
for(inti=0; i<this->trainData->rows; i++)
{
if(i < (class_integral[0]+class_counts[0]))
type = 0;
else if(i < (class_integral[1]+class_counts[1]))
type = 1;
else if(i < (class_integral[2]+class_counts[2]))
type = 2;
else if(i < (class_integral[3]+class_counts[3]))
type = 3;
else if(i < (class_integral[4]+class_counts[4]))
type = 4;
else if(i < (class_integral[5]+class_counts[5]))
type = 5;
else if(i < (class_integral[6]+class_counts[6]))
type = 6;
if(type>=0)
{
37
CvMat *r = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, r, i);
if((i >=(part*counts[type]+class_integral[type])) && (i<((part+1)*counts[type]
+class_integral[type])))
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*testdata, float, test_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*testlabels, int, test_iter, 0) = type;
}
test_iter++;
}
else
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*traindata, float, train_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*trainlabels, int, train_iter, 0) = type;
}
train_iter++;
}
}
}
}
voidMultiTrain::performCrossValidation(intparts)
{
vector<double> test_results;
intclass_counts[7] = {0};
intcounts [7] = {0};
if( parts !=0)
{
calculateTrainDataCount(class_counts);
for(inti=0; i<7; i++)
{
counts[i] = (class_counts[i]/parts);
}
}
if(parts == 0 || parts == 1) cout<<"Cross validation cannot be performed for such input values"<<endl;
else if(calcSum(counts, 7)<7) cout<<"The database is too small for performing the "<<parts<<"-fold cross
validation."<<endl;
else
{
//MAIN LOOP
for(intp=0; p<parts; p++)
{
//CREATE SETS
CvMat *traindata = 0;
CvMat *trainlabels = 0;
CvMat *testdata = 0;
CvMat *testlabels = 0;
testdata = cvCreateMat(calcSum(counts, 7), 36*256, CV_32FC1);
testlabels = cvCreateMat(calcSum(counts, 7),1, CV_32SC1);
traindata = cvCreateMat(this->trainData->rows - calcSum(counts, 7), 36*256, CV_32FC1);
trainlabels = cvCreateMat(this->trainData->rows - calcSum(counts,7), 1, CV_32SC1); 
38
//PREPARE SETS
this->prepareSets(class_counts, counts, p, parts, traindata, trainlabels, testdata, testlabels);
//PERFORM TRAINING
cout<<"Training the SVM classifier...part#"<<p<<endl;
SVM.train(traindata, trainlabels, 0,0, this->params);
//PERFORM TESTING
intTP = 0;
for(inti=0; i<(int)traindata->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(traindata, row, i);
intres = (int)this->SVM.predict(row);
if(res == trainlabels->data.i[i]) 
TP++;
}
doubleaccuracy = (double)TP/(double)traindata->rows;
cout<<"accuracy for part#"<<p<<" : "<<accuracy<<endl;
//RELEASE SETS
cvReleaseMat(&traindata);
cvReleaseMat(&trainlabels);
cvReleaseMat(&testdata);
cvReleaseMat(&testlabels);
this->SVM.clear();
cout<<"-----------------------------------------"<<endl;
}
}
}
intMultiTrain::getPrediction(IplImage *image)
{
intres = 0;
FaceDetection fd(image);
if( !fd.detectElements()) return-3;
fd.setRegions();
Face f = fd.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
CvMat *mat = cvCreateMat(1, 36*256, CV_32FC1);
//cvInitMatHeader(mat, 1, 36*256, CV_32FC1, fe.feature_vector);
for(inti=0; i<mat->cols; i++)
mat->data.fl[i] = fe.feature_vector[i];
res = (int) this->SVM.predict(mat);
cvReleaseMat(&mat);
returnres;
}
voidMultiTrain::testModel(string filename)
39
{
this->loadDataSet(filename);
intTP = 0; //true prediction counter
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<TP<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::createConfusionMatrix(string filename)
{
intconfusionMatrix[7][7]= {0};
intoverall[7] = {0};
intTP = 0;
this->loadDataSet(filename);
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
confusionMatrix[this->labels->data.i[i]][res]++;
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<"-------------------------------------"<<endl;
cout<<"----------confusion matrix-----------"<<endl;
for(inti=0; i<7; i++)
{
overall[i] = calcSum(confusionMatrix[i], 7);
for(intj=0; j<7; j++)
{
doubleperc = (double)confusionMatrix[i][j]*100/(double)overall[i];
cout<<perc<<"\t";
}
cout<<endl;
}
cout<<"-------------------------------------"<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::processData(string path, inti)
{
//(inputdir+"/"+filenames[i]).c_str()
IplImage *image = cvLoadImage(path.c_str(), 1);
FaceDetection detector(image);
if( detector.detectElements())
{
detector.setRegions();
40
Face f = detector.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
for(intj=0; j<36*256; j++)
{
this->trainData->data.fl[i*36*256+j] = fe.feature_vector[j];
}
cvReleaseImage(&up);
cvReleaseImage(&lo);
}
cvReleaseImage(&image);
}
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
intareaA = a->width * a->height;
intareaB = b->width * b->height;
if(areaA < areaB) return1;
else return-1;
}
static intcomp_func_x(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
if(a->x > b->x) return1;
else return-1;
}
lbp.hpp
/**
* author:
* class for LBP encoding
*/
#include "modules.hpp"
#include "cxcore.h"
classLBP 
{ 
public: 
LBP(); 
public: 
~LBP(); 
41
IplImage* image; 
IplImage* LBPimage; 
CvHistogram* hist;
public: 
/* calculate LBP features */
voidcreateLBP(IplImage *patch); 
/* create histogram of LBP features */
voidhistogram();
/* copy histogram to feature set */
voidLBP::fillFeatureSet(float*set, intstart_indx);
}; 
lbp.cpp
/**
* author: 
*/
#include "lbp.hpp"
LBP::LBP()
{
image =0;
LBPimage =0;
hist =0;
}
LBP::~LBP()
{
if(image) cvReleaseImage(&image);
if(LBPimage) cvReleaseImage(&LBPimage);
}
voidLBP::createLBP(IplImage* patch)
{
IplImage* temp_image = cvCreateImage(cvGetSize(patch), patch->depth, patch->nChannels);
cvCopy(patch, temp_image);
image = cvCreateImage(cvSize(temp_image->width, temp_image->height), 8, 1); 
if(temp_image->nChannels == 3) 
{
cvCvtColor(temp_image, image, CV_BGR2GRAY); 
}
LBPimage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
intcenter=0; 
intcenter_lbp=0; 
for(introw=1; row<image->height-1; row++) 
{
for(intcol=1; col<image->width-1; col++) 
{
center = cvGetReal2D(image, row, col);
center_lbp = 0; 
if(center >= cvGetReal2D(image, row-1, col-1)) 
{
center_lbp += 1; 
}
if(center >= cvGetReal2D(image, row-1, col)) 
42
{
center_lbp += 2; 
}
if(center >= cvGetReal2D(image, row-1, col+1)) 
{
center_lbp += 4; 
}
if(center >= cvGetReal2D(image, row, col-1)) 
{
center_lbp += 8; 
}
if(center >= cvGetReal2D(image, row, col+1)) 
{
center_lbp += 16; 
}
if(center >= cvGetReal2D(image, row+1, col-1)) 
{
center_lbp += 32; 
}
if(center >= cvGetReal2D(image, row+1, col)) 
{
center_lbp += 64; 
}
if(center >= cvGetReal2D(image, row+1, col+1)) 
{
center_lbp += 128; 
}
cvSetReal2D(LBPimage, row, col, center_lbp); 
}
}
cvReleaseImage(&temp_image); 
}
voidLBP::histogram()
{
intbins = 256;
inthsize[] = {bins};
floatrange[] = {0,256};
float* ranges[] = {range};
floatmin_value =0, max_value = 0;
IplImage * planes[] = {this->LBPimage};
this->hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(planes, this->hist, 0,0);
}
voidLBP::fillFeatureSet(float*set, intstart_indx)
{
for(inti=0; i<256; i++)
{
set[i+start_indx] = cvQueryHistValue_1D(hist, i);
}
}
43
helpers.hpp
/**
* author: 
* set of different functions
*/
#ifndefhelpers_hpp
#definehelpers_hpp
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#include <math.h>
#include <iostream>
using namespacestd;
/* function for listing files that match the pattern from directory*/
voidlistFiles(string directory, string pattern, vector<string> &files);
/* function for concatenating strings with integers */
string createSname(string path, string fname, string f, intindx, string ext);
/* function for concatenating strings - creating temporary file names */
string createFname(string path, string fname, string ext);
/* function for translating emotion codes */
string showResult(intcode);
/* function for suming the array values*/
intcalcSum(int*tab, intn);
/* function for suming the array values from indx1 to indx2*/
intcalcSum(int*tab, intidx1, intidx2);
#endif
helpers.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
voidlistFiles (string directory, string pattern, vector<string> &files)
{
string command = "dir "+directory+"\\"+pattern+" /B > temp.txt";
string d;
system(command.c_str());
ifstream in;
in.open("temp.txt", ifstream::in);
if( in.is_open())
{
while(true)
{
if(!(in>>d)) break;
files.push_back(d);
}
in.close();
}
44
}
string createSname(string path, string fname, string f, intindx, string ext)
{
//path +filename+ frame+ idnx + ext
string t;
stringstream s;
s<< indx;
s>> t;
string result = path+fname+f+t+ext;
returnresult;
}
string createFname(string path, string fname, string ext)
{
string result= path+fname+ext;
returnresult;
}
string showResult(intcode)
{
string result="";
switch(code)
{
case0:
result = "neutral";
break;
case1:
result = "happy";
break;
case2:
result = "sad";
break;
case3:
result = "surprised";
break;
case4:
result = "angry";
break;
case5:
result = "fear";
break;
case6:
result = "disgusted";
break;
}
returnresult;
}
intcalcSum(int*tab, intn)
{
intsum =0;
for(inti =0; i<n; i++)
sum+= tab[i];
returnsum;
}
intcalcSum(int*tab, intidx1, intidx2)
{
intsum=0;
for(inti=idx1; i<=idx2; i++)
sum+= tab[i];
45
returnsum;
}
tasks.hpp
/**
* author 
* functions for performing particular tasks
*/
#ifndeftasks_hpp
#definetasks_hpp
#include "modules.hpp"
/** show image **/
voidshow(IplImage *im);
/** sample emotion recognition **/
voidsample(string filename);
/** perform face detection **/
voidprocessDetection(string inputdir, string outputdir, string pattern);
/** capture snaphots (frames) from a video file **/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern);
/*****DEMO****/
classDemo
{
public:
Demo(string filename, string type);
~Demo();
vector<int> predictions;
string videofile;
string type;
voidprocessVideo();
voiddisplayPredictions(boolsave);
voidgetStatistics();
};
/************/
#endif
tasks.cpp
/**
* author: 
*/
#include "tasks.hpp"
/*********************DEMO***********************************/
Demo::Demo(string filename, string type)
{
this->videofile = filename;
this->predictions = vector<int>();
this->type = type;
}
46
Demo::~Demo(){}
voidDemo::processVideo()
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
cout<<"Loading classifier...."<<endl;
MultiTrain mt;
string path = "..\\datasets\\"+type+"\\";
mt.loadModel((path+"emo_svm_model.xml"));
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
{
cout<<"Processing video...."<<endl;
IplImage *frame=0;
intk = 0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if( k%10 == 0)
{
intres = (int)mt.getPrediction(frame);
cout<<"Prediction for frame #"<<k<<" => "<<res<<endl;
this->predictions.push_back(res);
}
k++;
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
}
voidDemo::displayPredictions(boolsave)
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
CvVideoWriter *writer = 0;
intisColor = 1;
intfps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
intframeWidth = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
intframeHeight = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
if(save)
{
writer = cvCreateVideoWriter("demo.avi",-1, fps, cvSize(frameWidth, frameHeight), isColor);
}
cvNamedWindow( "preview", 1 );
CvFont font;
cvInitFont(&font,0, 1.1f, 1.1f,0,2,8); 
string result ="";
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
47
{
IplImage *frame=0;
intk = 0, l=0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame) break;
if( k%10 == 0)
{ 
result = showResult(this->predictions[l++]);
cout<<"Prediction for frame #"<<k<<" => "<<result<<endl;
}
cvPutText(frame, result.c_str(), cvPoint(50,50), &font, cvScalar(255,0,0));
cvShowImage("preview",frame);
if(save && writer) 
{
cvWriteFrame(writer, frame);
}
cvWaitKey(1000/fps);
k++; 
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
cvDestroyWindow("preview");
if(writer) cvReleaseVideoWriter(&writer);
}
voidDemo::getStatistics()
{
intn, h, sd, su, a, f,d;
n=h=sd=su=a=f=d=0;
for(inti=0; i< (int)this->predictions.size(); i++)
{
switch(this->predictions[i])
{
case0:
n++; break;
case1:
h++; break;
case2:
sd++; break;
case3:
su++; break;
case4:
a++; break;
case5:
f++; break;
case6:
d++; break;
}
}
cout<<"---------------------------------"<<endl;
cout<<"Emotions recognized:"<<endl;
cout<<"Neutral: " << (double)n/(double)this->predictions.size() <<endl;
cout<<"Happy: " << (double)h/(double)this->predictions.size() <<endl;
cout<<"Sad: " << (double)sd/(double)this->predictions.size() <<endl;
cout<<"Suprised: " << (double)su/(double)this->predictions.size() <<endl;
cout<<"Angry: " << (double)a/(double)this->predictions.size() <<endl;
cout<<"Fear: " << (double)f/(double)this->predictions.size() <<endl;
48
cout<<"Disgusted: " << (double)d/(double)this->predictions.size() <<endl;
cout<<"---------------------------------"<<endl;
}
/********************************************************/
voidshow(IplImage *im)
{
cvNamedWindow("preview");
cvShowImage("preview", im);
cvWaitKey(0);
cvDestroyWindow("preview");
}
/********************************************************/
voidsample(string filename, string type)
{
CvFont font;
cvInitFont(&font,0, 1.0f, 1.0f,0,2,8); 
IplImage *img = cvLoadImage(filename.c_str(), 1);
MultiTrain mt;
cout<<"Loading the classifier...."<<endl;
mt.loadModel("..\\datasets\\"+type+"\\emo_svm_model.xml");
intres = mt.getPrediction(img);
string result = showResult(res);
cvPutText(img, result.c_str(), cvPoint(20,20), &font, cvScalar(255,255,0));
cout<<result<<endl;
show(img);
cvReleaseImage(&img);
}
/************************************************************/
voidprocessDetection(string inputdir, string outputdir, string pattern="*.jpg")
{
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames);
try
{
IplImage *image=0;
for(inti=0; i<(int)filenames.size(); i++)
{
image = cvLoadImage((inputdir+"/"+filenames[i]).c_str(), 1);
FaceDetection detector(image);
if( ! detector.detectElements()) continue;
detector.detectElements();
detector.setRegions();
Face f = detector.getFace();
cvRectangle(image, cvPoint(f.upperface.x, f.upperface.y), cvPoint(f.upperface.x+
f.upperface.width, f.upperface.y+f.upperface.height), cvScalar(0,255,255));
cvRectangle(image, cvPoint(f.lowerface.x, f.lowerface.y),
cvPoint(f.lowerface.x+f.lowerface.width, f.lowerface.y+f.lowerface.height), cvScalar(0,0,255));
cvSaveImage((outputdir+"/"+filenames[i]).c_str(), image);
cout<<"image #"<<i<<(outputdir+"/"+filenames[i]).c_str()<<" is being saved..."<<endl;
}
cvReleaseImage(&image);
}
catch(cv::Exception &e)
{
cout<<e.what()<<endl;
49
}
}
/**********************************************************************/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern="*")
{
CvCapture *capture = 0;
cvNamedWindow( "preview", 1);
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames); 
for(inti=0; i< (int) filenames.size(); i++)
{
capture = cvCaptureFromAVI((inputdir+"/"+filenames[i]).c_str());
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
continue;
}
else
{
IplImage *frame=0;
intk=1;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if(k%rate == 1)
{
//if(k>30)
{
string fname = filenames[i].substr(0, (filenames[i].length()-4));
fname = createSname(outputdir+"/", fname, "_frame", k, ".jpg");
cvSaveImage(fname.c_str(), frame); 
cout<<"file: "<<fname<<" is saving..."<<endl;
}
}
k++;
cvShowImage("preview",frame);
cvWaitKey(1000/fps);
}
cvReleaseImage(&frame);
}
}

cvReleaseCapture(&capture);
cvDestroyWindow("preview");
}
50
main.cpp
#include "modules.hpp"
#include "lbp.hpp"
#include "tasks.hpp"
voidplayDemo(string type)
{
//string videofile = "../sample_videos/disgs_0004_3.mpg";
string videofile = "../sample_videos/happy_0014_1.mpg";
//string videofile = "../sample_videos/sadns_0005_1.mpg";
Demo demo(videofile, type);
demo.processVideo();
demo.displayPredictions(false);
demo.getStatistics();
}
intmain()
{
try
{
playDemo("FEED");
}
catch( Exception &e)
{
cout<<e.what()<<endl;
}
system("pause");
return0;
}

how i can add and load data file to train svm classifier?

i read a pdf file that is use FG-NET dataset to train svm. FG-NET Facial Expression and Emotion Database consists of MPEG video files with spontaneous emotions recorded. Database contains examples gathered from 18 subjects ( 9 female and 9 male). Proposed system was trained with captured video frames in which the displayed emotion is very representative. The training set consists of 675 images of seven states neutral and emotional (surprise, fear, disgust, sadness, happiness and anger). i have video files and image files,which of them use to train and which of use to test?

i use video file c:\happy_0014_1.mpg for test.i want to know add fg-net dataset to program.i have tow fg-net dataset:one of them consists video files and other consists images.i think that i should creat text file and copy dataset images. how i can define path to load dataset for both train and test steps?(ex c:\feed\dataset...)

  modules.hpp
/**
*
* header file for 3 modules of FER system
* Face detection 
* Expression description
* Training & Recognition
*/
#ifndefmodules_hpp
#definemodules_hpp
#include "helpers.hpp"
#include "cv.h"
#include "highgui.h"
#include "ml.h"
using namespacecv;
classLandmark
{
public:
Landmark();
Landmark(CvRect box);
CvRect bbox;
boolisEmpty();
intgetX();
intgetY();
CvRect getRect();
};
classEye :publicLandmark
{
public:
Eye(){}
Eye(CvRect box){}
~Eye(){}
};
classEyebrow :publicLandmark
{
public:
CvPoint left;
CvPoint center;
CvPoint right;
Eyebrow(){}
Eyebrow(CvRect box){}
~Eyebrow(){}
};
classMouth :publicLandmark
{
public:
CvPoint left;
CvPoint center;
28
CvPoint upperCenter;
CvPoint lowerCenter;
CvPoint right;
Mouth(){}
Mouth(CvRect box){}
~Mouth(){}
};
classFace :publicLandmark
{
public:
CvRect bbox;
CvRect upperface;
CvRect lowerface;
Eye lefteye;
Eye righteye;
Mouth mouth;
Eyebrow lefteyebrow;
Eyebrow righteyebrow;
/**methods**/
Face(){}
Face(CvRect box){}
~Face(){}
voiddrawBox(IplImage* image, CvRect box);
voiddrawPoints(IplImage *image);
voiddrawElements(IplImage *image);
};
/********detector***********/
classFaceDetection
{
public:
Face face;
private:
CvMemStorage *buffer;
CvHaarClassifierCascade *faceCascade, *reyeCascade, *leyeCascade, *mouthCascade;
CvPoint currentROIlocation;
IplImage *image;
public:
FaceDetection(IplImage *image);
~FaceDetection();
Face getFace();
IplImage* getImage();
voidcalculatePoints();
booldetectElements();
voidsetRegions();
private:
booldetectFace();
voiddetectEyes();
CvRect setBrow(CvRect box);
voiddetectMouth();
voidsetEyebrows();
voidsetAbsoluteCoordinates(CvRect &r);
voidsetAbsoluteCoordinates(CvPoint &p);
voidsetCurrentROIlocation(intx, inty);
CvSeq* getMax(CvSeq * contours, doubleboxarea);
};
29
/*********extraction**********/
classFeatureExtraction
{
public:
FeatureExtraction();
FeatureExtraction(IplImage* upper, IplImage* lower);
~FeatureExtraction();
IplImage *upper;
IplImage *lower;
intindx;
floatfeature_vector[36*256];
public:
voidnormalize(IplImage* upper, IplImage* lower);
voidcalculateLBP();
voidsetLBPGrid(IplImage *img, intwidth, intheight);
};
/****Multiclass Training****/
classMultiTrain 
{
public:
MultiTrain();
~MultiTrain();
CvSVM SVM;
CvMat *trainData;
CvMat *labels;
CvTermCriteria criteria;
CvSVMParams params;
doubleACC;
voidcreateDataSet(string inputdir, string outputdir);
voidloadDataSet(string filename);
voidtrainModel(string outputdir);
voidloadModel(string filename);
intgetPrediction(IplImage *image);
voidtestModel(string filename);
voidcreateConfusionMatrix(string filename);
voidcalculateTrainDataCount(inttab[]);
voidprepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels);
voidperformCrossValidation(intparts);
private:
voidprocessData(string path, inti);
};
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata);
static intcomp_func_x(const void* _a, const void* _b, void* userdata);
#endif
30
modules.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
#include "lbp.hpp"
/****landmark****/
Landmark::Landmark()
{
this->bbox = cvRect(0,0,0,0);
}
Landmark::Landmark(CvRect box)
{
this->bbox = box;
}
boolLandmark::isEmpty()
{
if(this->bbox.height == 0 || this->bbox.width == 0)
return true;
return false;
}
intLandmark::getX()
{
return this->bbox.x;
}
intLandmark::getY()
{
return this->bbox.y;
}
CvRect Landmark::getRect()
{
return this->bbox;
}
/******Face******/
voidFace::drawBox(IplImage* image, CvRect box)
{
cvRectangle(image, cvPoint(box.x, box.y), cvPoint(box.x+box.width, box.y+box.height), CV_RGB(255,0,0),
1, 8, 0);
}
voidFace::drawElements(IplImage *image)
{
this->drawBox(image, this->bbox);
this->drawBox(image, this->lefteye.bbox);
this->drawBox(image, this->righteye.bbox);
this->drawBox(image, this->mouth.bbox);
}
/***********Face Detection*******************/
FaceDetection::FaceDetection(IplImage *image)
{
this->buffer = cvCreateMemStorage(0);
char*face = "../haarcascades/haarcascade_frontalface_default.xml";
char*eye_left= "../haarcascades/haarcascade_mcs_lefteye.xml";
char*eye_right = "../haarcascades/haarcascade_mcs_righteye.xml";
char*mouth = "../haarcascades/haarcascade_mcs_mouth.xml";
this->faceCascade = ( CvHaarClassifierCascade* )cvLoad( face, 0, 0, 0);
31
this->leyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_left, 0, 0, 0);
this->reyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_right, 0, 0, 0);
this->mouthCascade = ( CvHaarClassifierCascade* )cvLoad( mouth, 0, 0, 0);
this->currentROIlocation = cvPoint(0,0);
this->image = image;
this->face = Face();
}
FaceDetection::~FaceDetection()
{
cvReleaseHaarClassifierCascade( &faceCascade);
cvReleaseHaarClassifierCascade( &leyeCascade);
cvReleaseHaarClassifierCascade( &reyeCascade);
cvReleaseHaarClassifierCascade( &mouthCascade);
cvReleaseMemStorage( &buffer);
}
Face FaceDetection::getFace()
{
return this->face;
}
IplImage* FaceDetection::getImage()
{
return this->image;
}
boolFaceDetection::detectElements()
{
if(! this->detectFace()) return false;
this->detectEyes();
this->detectMouth();
this->setEyebrows();
this->setCurrentROIlocation(0,0);
return true;
}
boolFaceDetection::detectFace()
{
CvSeq *faces = cvHaarDetectObjects(this->image, faceCascade, buffer, 1.1, 3, 0, cvSize(30,30));
if(!faces->total) return false;
else
{
/**get the biggest detected face**/
cvSeqSort(faces, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem(faces, 0);
this->face.bbox = *r;
cvClearMemStorage(this->buffer);
}
return true;
}
voidFaceDetection::detectEyes()
{
/*left eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x, this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y);
CvSeq *eyes = cvHaarDetectObjects(this->image, this->leyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(eyes, comp_func, 0);
if( eyes->total != 0)
{
CvRect *left = (CvRect*) cvGetSeqElem( eyes, 0);
this->setAbsoluteCoordinates(*left);
this->face.lefteye.bbox = *left;
32
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
/*right eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y);
CvSeq *reyes = cvHaarDetectObjects(this->image, this->reyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(reyes, comp_func, 0);
if( reyes->total != 0)
{
CvRect *right = (CvRect*) cvGetSeqElem( reyes, 0);
this->setAbsoluteCoordinates(*right);
this->face.righteye.bbox = *right;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::detectMouth()
{
cvSetImageROI(image, cvRect(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2), this->face.bbox.width, (this->face.bbox.height/2)));
setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2));
CvSeq *mouth = cvHaarDetectObjects(image, mouthCascade, buffer, 1.1, 3,0, cvSize(1,1));
if(mouth->total)
{
cvSeqSort(mouth, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem( mouth, 0);
this->setAbsoluteCoordinates(*r);
this->face.mouth.bbox = *r;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::setEyebrows()
{
this->face.lefteyebrow.bbox = this->setBrow(this->face.lefteye.bbox);
this->face.righteyebrow.bbox = this->setBrow(this->face.righteye.bbox);
}
CvRect FaceDetection::setBrow(CvRect box)
{
intx = box.x - box.width/3;
inty = box.y - box.height*3/2;
intwidth = box.width*5/3;
intheight = box.height*2;
returncvRect(x, y, width, height);
}
voidFaceDetection::setRegions()
{
//upper
intx = this->face.lefteyebrow.bbox.x;
inty = this->face.lefteyebrow.bbox.y;
intwidth = this->face.lefteyebrow.bbox.width + this->face.righteyebrow.bbox.width;
intheight = this->face.lefteyebrow.bbox.height + this->face.lefteye.bbox.height;
33
this->face.upperface = cvRect(x, y, width, height);
//lower
x = this->face.lefteye.bbox.x;
width = (this->face.righteye.bbox.x+this->face.righteye.bbox.width) - this->face.lefteye.bbox.x;
y = this->face.mouth.bbox.y - this->face.mouth.bbox.height/2;
height = 2*this->face.mouth.bbox.height;
this->face.lowerface = cvRect(x,y,width,height);
}
voidFaceDetection::setCurrentROIlocation(intx, inty)
{
currentROIlocation.x = x;
currentROIlocation.y = y;
}
voidFaceDetection::setAbsoluteCoordinates(CvRect &r)
{
r.x += currentROIlocation.x;
r.y += currentROIlocation.y;
}
voidFaceDetection::setAbsoluteCoordinates(CvPoint &p)
{
p.x += currentROIlocation.x;
p.y += currentROIlocation.y;
}
/***********feature extraction ***************/
FeatureExtraction::FeatureExtraction()
{
this->upper = 0;
this->lower = 0;
this->indx = 0;
}
FeatureExtraction::FeatureExtraction(IplImage* upper, IplImage* lower)
{
this->normalize(upper, lower);
}
FeatureExtraction::~FeatureExtraction()
{
if(this->upper) cvReleaseImage(&this->upper);
if(this->lower) cvReleaseImage(&this->lower);
}
voidFeatureExtraction::normalize(IplImage* upper, IplImage* lower)
{
this->upper = cvCreateImage(cvSize(90,48), upper->depth, upper->nChannels);
this->lower = cvCreateImage(cvSize(72,48), lower->depth, lower->nChannels);
cvResize(upper, this->upper);
cvResize(lower, this->lower);
}
voidFeatureExtraction::calculateLBP()
{
this->indx = 0;
this->setLBPGrid(this->lower, 18, 12);
this->setLBPGrid(this->upper, 18, 12);
}
voidFeatureExtraction::setLBPGrid(IplImage *img, intwidth, intheight)
{
for(inti=0; i< (img->width/width); i++)
34
for(intj=0; j< (img->height/height); j++)
{
LBP lbp;
cvSetImageROI(img, cvRect(i*width,j*height, width, height));
lbp.createLBP(img);
lbp.histogram();
lbp.fillFeatureSet(this->feature_vector, this->indx);
this->indx+=256;
//cvRectangle(img, cvPoint(0,0), cvPoint(width, height), cvScalar(255,0,0));
cvResetImageROI(img);
}
}
/******MulticlassTraining****/
MultiTrain::MultiTrain()
{
this->trainData = 0;
this->labels = 0;
this->params = CvSVMParams();
this->params.term_crit.epsilon = 1.0000000116860974e-007;
this->params.term_crit.type = CV_TERMCRIT_EPS;
this->params.svm_type = CvSVM::C_SVC;
this->params.kernel_type = CvSVM::RBF;
this->params.gamma = 3.0000000000000001e-006;
this->params.C = 20;
}
MultiTrain::~MultiTrain()
{
if(this->trainData) cvReleaseMat(&this->trainData);
if(this->labels) cvReleaseMat(&this->labels);
}
voidMultiTrain::createDataSet(string inputdir, string outputdir)
{
/*
0 neutral
1 happiness
2 sadness
3 surprise
4 anger
5 fear
6 disgust
*/
vector<string> neutral = vector<string>();
vector<string> happy = vector<string>();
vector<string> sad = vector<string>();
vector<string> surprise = vector<string>();
vector<string> angry = vector<string>();
vector<string> fear = vector<string>();
vector<string> disgust = vector<string>();
listFiles(inputdir, "*neutr*", neutral);
listFiles(inputdir, "*happy*", happy);
listFiles(inputdir, "*sad*", sad);
listFiles(inputdir, "*surpr*", surprise);
listFiles(inputdir, "*ang*", angry);
listFiles(inputdir, "*fear*", fear);
listFiles(inputdir, "*disg*", disgust);
intcount = (int)(neutral.size()+happy.size()+sad.size()+surprise.size()+angry.size()+fear.size()+disgust.size());
35
cout<<count<<endl;
this->trainData = cvCreateMat(count, 36*256, CV_32FC1);
this->labels = cvCreateMat(count, 1, CV_32SC1);
cvZero(this->trainData);
cvZero(this->labels);
intj=0;
for(inti=0; i< (int)neutral.size(); i++)
{
cout<<"processing image # "<<i<<endl;
this->processData(inputdir+"/"+neutral[i], i);
CV_MAT_ELEM(*this->labels, int, i,0) = 0;
}
j += (int) neutral.size();
for(inti=0; i< (int)happy.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+happy[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 1;
}
j += (int) happy.size();
for(inti=0; i< (int)sad.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+sad[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 2;
}
j += (int) sad.size();
for(inti=0; i< (int)surprise.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+surprise[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 3;
}
j += (int) surprise.size();
for(inti=0; i< (int)angry.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+angry[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 4;
}
j += (int) angry.size();
for(inti=0; i< (int)fear.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+fear[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 5;
}
j += (int) fear.size();
for(inti=0; i< (int)disgust.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+disgust[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0)= 6;
}
CvFileStorage *file = cvOpenFileStorage((outputdir+"/emotrainset.xml").c_str(), 0, CV_STORAGE_WRITE);
cvWrite(file, "dataset", this->trainData);
cvWrite(file, "labels", this->labels);
36
cvReleaseFileStorage(&file);
}
voidMultiTrain::loadDataSet(string filename)
{
CvFileStorage* file = cvOpenFileStorage(filename.c_str(), 0, CV_STORAGE_READ);
this->trainData = (CvMat*)cvRead(file, cvGetFileNodeByName(file,0, "dataset"));
this->labels = (CvMat*) cvRead(file, cvGetFileNodeByName(file,0, "labels"));
cvReleaseFileStorage(&file);
}
voidMultiTrain::trainModel(string outputdir)
{
cout<<"Training the SVM classifier......"<<endl;
SVM.train(this->trainData, this->labels, 0,0,this->params);
SVM.save((outputdir+"/emo_svm_model.xml").c_str());
cout<<"SVM model saved to file: "<<"emo_svm_model.xml"<<endl;
}
voidMultiTrain::loadModel(string filename)
{
this->SVM.load(filename.c_str());
}
voidMultiTrain::calculateTrainDataCount(inttab[])
{
for(inti=0; i<this->labels->rows; i++)
{
tab[CV_MAT_ELEM(*this->labels, int, i,0)]++;
}
}
voidMultiTrain::prepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels)
{
intclass_integral[7] = {0};
for(inti=1; i<7; i++)
{
class_integral[i] = calcSum(class_counts,0,i-1);
}
inttest_iter, train_iter;
test_iter = train_iter = 0;
inttype = -1;
for(inti=0; i<this->trainData->rows; i++)
{
if(i < (class_integral[0]+class_counts[0]))
type = 0;
else if(i < (class_integral[1]+class_counts[1]))
type = 1;
else if(i < (class_integral[2]+class_counts[2]))
type = 2;
else if(i < (class_integral[3]+class_counts[3]))
type = 3;
else if(i < (class_integral[4]+class_counts[4]))
type = 4;
else if(i < (class_integral[5]+class_counts[5]))
type = 5;
else if(i < (class_integral[6]+class_counts[6]))
type = 6;
if(type>=0)
{
37
CvMat *r = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, r, i);
if((i >=(part*counts[type]+class_integral[type])) && (i<((part+1)*counts[type]
+class_integral[type])))
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*testdata, float, test_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*testlabels, int, test_iter, 0) = type;
}
test_iter++;
}
else
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*traindata, float, train_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*trainlabels, int, train_iter, 0) = type;
}
train_iter++;
}
}
}
}
voidMultiTrain::performCrossValidation(intparts)
{
vector<double> test_results;
intclass_counts[7] = {0};
intcounts [7] = {0};
if( parts !=0)
{
calculateTrainDataCount(class_counts);
for(inti=0; i<7; i++)
{
counts[i] = (class_counts[i]/parts);
}
}
if(parts == 0 || parts == 1) cout<<"Cross validation cannot be performed for such input values"<<endl;
else if(calcSum(counts, 7)<7) cout<<"The database is too small for performing the "<<parts<<"-fold cross
validation."<<endl;
else
{
//MAIN LOOP
for(intp=0; p<parts; p++)
{
//CREATE SETS
CvMat *traindata = 0;
CvMat *trainlabels = 0;
CvMat *testdata = 0;
CvMat *testlabels = 0;
testdata = cvCreateMat(calcSum(counts, 7), 36*256, CV_32FC1);
testlabels = cvCreateMat(calcSum(counts, 7),1, CV_32SC1);
traindata = cvCreateMat(this->trainData->rows - calcSum(counts, 7), 36*256, CV_32FC1);
trainlabels = cvCreateMat(this->trainData->rows - calcSum(counts,7), 1, CV_32SC1); 
38
//PREPARE SETS
this->prepareSets(class_counts, counts, p, parts, traindata, trainlabels, testdata, testlabels);
//PERFORM TRAINING
cout<<"Training the SVM classifier...part#"<<p<<endl;
SVM.train(traindata, trainlabels, 0,0, this->params);
//PERFORM TESTING
intTP = 0;
for(inti=0; i<(int)traindata->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(traindata, row, i);
intres = (int)this->SVM.predict(row);
if(res == trainlabels->data.i[i]) 
TP++;
}
doubleaccuracy = (double)TP/(double)traindata->rows;
cout<<"accuracy for part#"<<p<<" : "<<accuracy<<endl;
//RELEASE SETS
cvReleaseMat(&traindata);
cvReleaseMat(&trainlabels);
cvReleaseMat(&testdata);
cvReleaseMat(&testlabels);
this->SVM.clear();
cout<<"-----------------------------------------"<<endl;
}
}
}
intMultiTrain::getPrediction(IplImage *image)
{
intres = 0;
FaceDetection fd(image);
if( !fd.detectElements()) return-3;
fd.setRegions();
Face f = fd.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
CvMat *mat = cvCreateMat(1, 36*256, CV_32FC1);
//cvInitMatHeader(mat, 1, 36*256, CV_32FC1, fe.feature_vector);
for(inti=0; i<mat->cols; i++)
mat->data.fl[i] = fe.feature_vector[i];
res = (int) this->SVM.predict(mat);
cvReleaseMat(&mat);
returnres;
}
voidMultiTrain::testModel(string filename)
39
{
this->loadDataSet(filename);
intTP = 0; //true prediction counter
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<TP<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::createConfusionMatrix(string filename)
{
intconfusionMatrix[7][7]= {0};
intoverall[7] = {0};
intTP = 0;
this->loadDataSet(filename);
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
confusionMatrix[this->labels->data.i[i]][res]++;
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<"-------------------------------------"<<endl;
cout<<"----------confusion matrix-----------"<<endl;
for(inti=0; i<7; i++)
{
overall[i] = calcSum(confusionMatrix[i], 7);
for(intj=0; j<7; j++)
{
doubleperc = (double)confusionMatrix[i][j]*100/(double)overall[i];
cout<<perc<<"\t";
}
cout<<endl;
}
cout<<"-------------------------------------"<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::processData(string path, inti)
{
//(inputdir+"/"+filenames[i]).c_str()
IplImage *image = cvLoadImage(path.c_str(), 1);
FaceDetection detector(image);
if( detector.detectElements())
{
detector.setRegions();
40
Face f = detector.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
for(intj=0; j<36*256; j++)
{
this->trainData->data.fl[i*36*256+j] = fe.feature_vector[j];
}
cvReleaseImage(&up);
cvReleaseImage(&lo);
}
cvReleaseImage(&image);
}
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
intareaA = a->width * a->height;
intareaB = b->width * b->height;
if(areaA < areaB) return1;
else return-1;
}
static intcomp_func_x(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
if(a->x > b->x) return1;
else return-1;
}
lbp.hpp
/**
* author:
* class for LBP encoding
*/
#include "modules.hpp"
#include "cxcore.h"
classLBP 
{ 
public: 
LBP(); 
public: 
~LBP(); 
41
IplImage* image; 
IplImage* LBPimage; 
CvHistogram* hist;
public: 
/* calculate LBP features */
voidcreateLBP(IplImage *patch); 
/* create histogram of LBP features */
voidhistogram();
/* copy histogram to feature set */
voidLBP::fillFeatureSet(float*set, intstart_indx);
}; 
lbp.cpp
/**
* author: 
*/
#include "lbp.hpp"
LBP::LBP()
{
image =0;
LBPimage =0;
hist =0;
}
LBP::~LBP()
{
if(image) cvReleaseImage(&image);
if(LBPimage) cvReleaseImage(&LBPimage);
}
voidLBP::createLBP(IplImage* patch)
{
IplImage* temp_image = cvCreateImage(cvGetSize(patch), patch->depth, patch->nChannels);
cvCopy(patch, temp_image);
image = cvCreateImage(cvSize(temp_image->width, temp_image->height), 8, 1); 
if(temp_image->nChannels == 3) 
{
cvCvtColor(temp_image, image, CV_BGR2GRAY); 
}
LBPimage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
intcenter=0; 
intcenter_lbp=0; 
for(introw=1; row<image->height-1; row++) 
{
for(intcol=1; col<image->width-1; col++) 
{
center = cvGetReal2D(image, row, col);
center_lbp = 0; 
if(center >= cvGetReal2D(image, row-1, col-1)) 
{
center_lbp += 1; 
}
if(center >= cvGetReal2D(image, row-1, col)) 
42
{
center_lbp += 2; 
}
if(center >= cvGetReal2D(image, row-1, col+1)) 
{
center_lbp += 4; 
}
if(center >= cvGetReal2D(image, row, col-1)) 
{
center_lbp += 8; 
}
if(center >= cvGetReal2D(image, row, col+1)) 
{
center_lbp += 16; 
}
if(center >= cvGetReal2D(image, row+1, col-1)) 
{
center_lbp += 32; 
}
if(center >= cvGetReal2D(image, row+1, col)) 
{
center_lbp += 64; 
}
if(center >= cvGetReal2D(image, row+1, col+1)) 
{
center_lbp += 128; 
}
cvSetReal2D(LBPimage, row, col, center_lbp); 
}
}
cvReleaseImage(&temp_image); 
}
voidLBP::histogram()
{
intbins = 256;
inthsize[] = {bins};
floatrange[] = {0,256};
float* ranges[] = {range};
floatmin_value =0, max_value = 0;
IplImage * planes[] = {this->LBPimage};
this->hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(planes, this->hist, 0,0);
}
voidLBP::fillFeatureSet(float*set, intstart_indx)
{
for(inti=0; i<256; i++)
{
set[i+start_indx] = cvQueryHistValue_1D(hist, i);
}
}
43
helpers.hpp
/**
* author: 
* set of different functions
*/
#ifndefhelpers_hpp
#definehelpers_hpp
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#include <math.h>
#include <iostream>
using namespacestd;
/* function for listing files that match the pattern from directory*/
voidlistFiles(string directory, string pattern, vector<string> &files);
/* function for concatenating strings with integers */
string createSname(string path, string fname, string f, intindx, string ext);
/* function for concatenating strings - creating temporary file names */
string createFname(string path, string fname, string ext);
/* function for translating emotion codes */
string showResult(intcode);
/* function for suming the array values*/
intcalcSum(int*tab, intn);
/* function for suming the array values from indx1 to indx2*/
intcalcSum(int*tab, intidx1, intidx2);
#endif
helpers.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
voidlistFiles (string directory, string pattern, vector<string> &files)
{
string command = "dir "+directory+"\\"+pattern+" /B > temp.txt";
string d;
system(command.c_str());
ifstream in;
in.open("temp.txt", ifstream::in);
if( in.is_open())
{
while(true)
{
if(!(in>>d)) break;
files.push_back(d);
}
in.close();
}
44
}
string createSname(string path, string fname, string f, intindx, string ext)
{
//path +filename+ frame+ idnx + ext
string t;
stringstream s;
s<< indx;
s>> t;
string result = path+fname+f+t+ext;
returnresult;
}
string createFname(string path, string fname, string ext)
{
string result= path+fname+ext;
returnresult;
}
string showResult(intcode)
{
string result="";
switch(code)
{
case0:
result = "neutral";
break;
case1:
result = "happy";
break;
case2:
result = "sad";
break;
case3:
result = "surprised";
break;
case4:
result = "angry";
break;
case5:
result = "fear";
break;
case6:
result = "disgusted";
break;
}
returnresult;
}
intcalcSum(int*tab, intn)
{
intsum =0;
for(inti =0; i<n; i++)
sum+= tab[i];
returnsum;
}
intcalcSum(int*tab, intidx1, intidx2)
{
intsum=0;
for(inti=idx1; i<=idx2; i++)
sum+= tab[i];
45
returnsum;
}
tasks.hpp
/**
* author 
* functions for performing particular tasks
*/
#ifndeftasks_hpp
#definetasks_hpp
#include "modules.hpp"
/** show image **/
voidshow(IplImage *im);
/** sample emotion recognition **/
voidsample(string filename);
/** perform face detection **/
voidprocessDetection(string inputdir, string outputdir, string pattern);
/** capture snaphots (frames) from a video file **/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern);
/*****DEMO****/
classDemo
{
public:
Demo(string filename, string type);
~Demo();
vector<int> predictions;
string videofile;
string type;
voidprocessVideo();
voiddisplayPredictions(boolsave);
voidgetStatistics();
};
/************/
#endif
tasks.cpp
/**
* author: 
*/
#include "tasks.hpp"
/*********************DEMO***********************************/
Demo::Demo(string filename, string type)
{
this->videofile = filename;
this->predictions = vector<int>();
this->type = type;
}
46
Demo::~Demo(){}
voidDemo::processVideo()
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
cout<<"Loading classifier...."<<endl;
MultiTrain mt;
string path = "..\\datasets\\"+type+"\\";
mt.loadModel((path+"emo_svm_model.xml"));
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
{
cout<<"Processing video...."<<endl;
IplImage *frame=0;
intk = 0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if( k%10 == 0)
{
intres = (int)mt.getPrediction(frame);
cout<<"Prediction for frame #"<<k<<" => "<<res<<endl;
this->predictions.push_back(res);
}
k++;
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
}
voidDemo::displayPredictions(boolsave)
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
CvVideoWriter *writer = 0;
intisColor = 1;
intfps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
intframeWidth = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
intframeHeight = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
if(save)
{
writer = cvCreateVideoWriter("demo.avi",-1, fps, cvSize(frameWidth, frameHeight), isColor);
}
cvNamedWindow( "preview", 1 );
CvFont font;
cvInitFont(&font,0, 1.1f, 1.1f,0,2,8); 
string result ="";
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
47
{
IplImage *frame=0;
intk = 0, l=0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame) break;
if( k%10 == 0)
{ 
result = showResult(this->predictions[l++]);
cout<<"Prediction for frame #"<<k<<" => "<<result<<endl;
}
cvPutText(frame, result.c_str(), cvPoint(50,50), &font, cvScalar(255,0,0));
cvShowImage("preview",frame);
if(save && writer) 
{
cvWriteFrame(writer, frame);
}
cvWaitKey(1000/fps);
k++; 
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
cvDestroyWindow("preview");
if(writer) cvReleaseVideoWriter(&writer);
}
voidDemo::getStatistics()
{
intn, h, sd, su, a, f,d;
n=h=sd=su=a=f=d=0;
for(inti=0; i< (int)this->predictions.size(); i++)
{
switch(this->predictions[i])
{
case0:
n++; break;
case1:
h++; break;
case2:
sd++; break;
case3:
su++; break;
case4:
a++; break;
case5:
f++; break;
case6:
d++; break;
}
}
cout<<"---------------------------------"<<endl;
cout<<"Emotions recognized:"<<endl;
cout<<"Neutral: " << (double)n/(double)this->predictions.size() <<endl;
cout<<"Happy: " << (double)h/(double)this->predictions.size() <<endl;
cout<<"Sad: " << (double)sd/(double)this->predictions.size() <<endl;
cout<<"Suprised: " << (double)su/(double)this->predictions.size() <<endl;
cout<<"Angry: " << (double)a/(double)this->predictions.size() <<endl;
cout<<"Fear: " << (double)f/(double)this->predictions.size() <<endl;
48
cout<<"Disgusted: " << (double)d/(double)this->predictions.size() <<endl;
cout<<"---------------------------------"<<endl;
}
/********************************************************/
voidshow(IplImage *im)
{
cvNamedWindow("preview");
cvShowImage("preview", im);
cvWaitKey(0);
cvDestroyWindow("preview");
}
/********************************************************/
voidsample(string filename, string type)
{
CvFont font;
cvInitFont(&font,0, 1.0f, 1.0f,0,2,8); 
IplImage *img = cvLoadImage(filename.c_str(), 1);
MultiTrain mt;
cout<<"Loading the classifier...."<<endl;
mt.loadModel("..\\datasets\\"+type+"\\emo_svm_model.xml");
intres = mt.getPrediction(img);
string result = showResult(res);
cvPutText(img, result.c_str(), cvPoint(20,20), &font, cvScalar(255,255,0));
cout<<result<<endl;
show(img);
cvReleaseImage(&img);
}
/************************************************************/
voidprocessDetection(string inputdir, string outputdir, string pattern="*.jpg")
{
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames);
try
{
IplImage *image=0;
for(inti=0; i<(int)filenames.size(); i++)
{
image = cvLoadImage((inputdir+"/"+filenames[i]).c_str(), 1);
FaceDetection detector(image);
if( ! detector.detectElements()) continue;
detector.detectElements();
detector.setRegions();
Face f = detector.getFace();
cvRectangle(image, cvPoint(f.upperface.x, f.upperface.y), cvPoint(f.upperface.x+
f.upperface.width, f.upperface.y+f.upperface.height), cvScalar(0,255,255));
cvRectangle(image, cvPoint(f.lowerface.x, f.lowerface.y),
cvPoint(f.lowerface.x+f.lowerface.width, f.lowerface.y+f.lowerface.height), cvScalar(0,0,255));
cvSaveImage((outputdir+"/"+filenames[i]).c_str(), image);
cout<<"image #"<<i<<(outputdir+"/"+filenames[i]).c_str()<<" is being saved..."<<endl;
}
cvReleaseImage(&image);
}
catch(cv::Exception &e)
{
cout<<e.what()<<endl;
49
}
}
/**********************************************************************/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern="*")
{
CvCapture *capture = 0;
cvNamedWindow( "preview", 1);
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames); 
for(inti=0; i< (int) filenames.size(); i++)
{
capture = cvCaptureFromAVI((inputdir+"/"+filenames[i]).c_str());
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
continue;
}
else
{
IplImage *frame=0;
intk=1;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if(k%rate == 1)
{
//if(k>30)
{
string fname = filenames[i].substr(0, (filenames[i].length()-4));
fname = createSname(outputdir+"/", fname, "_frame", k, ".jpg");
cvSaveImage(fname.c_str(), frame); 
cout<<"file: "<<fname<<" is saving..."<<endl;
}
}
k++;
cvShowImage("preview",frame);
cvWaitKey(1000/fps);
}
cvReleaseImage(&frame);
}
}

cvReleaseCapture(&capture);
cvDestroyWindow("preview");
}
50
main.cpp
#include "modules.hpp"
#include "lbp.hpp"
#include "tasks.hpp"
voidplayDemo(string type)
{
//string videofile = "../sample_videos/disgs_0004_3.mpg";
string videofile = "../sample_videos/happy_0014_1.mpg";
//string videofile = "../sample_videos/sadns_0005_1.mpg";
Demo demo(videofile, type);
demo.processVideo();
demo.displayPredictions(false);
demo.getStatistics();
}
intmain()
{
try
{
playDemo("FEED");
}
catch( Exception &e)
{
cout<<e.what()<<endl;
}
system("pause");
return0;
}

how i can add and load data file to train svm classifier?

i read a pdf file that is use FG-NET dataset to train svm. FG-NET Facial Expression and Emotion Database consists of MPEG video files with spontaneous emotions recorded. Database contains examples gathered from 18 subjects ( 9 female and 9 male). Proposed system was trained with captured video frames in which the displayed emotion is very representative. The training set consists of 675 images of seven states neutral and emotional (surprise, fear, disgust, sadness, happiness and anger). inputdir , outputdir,path words that used in this c++ program ,how can i have video files and image files,which of them use to train and which of use to test?

i use video file c:\happy_0014_1.mpg for test.i want to know add fg-net dataset to program.i have tow fg-net dataset:one of them consists video files and other consists images.i think that i should creat text file and copy dataset images. define inputdir,outputdir,path? how i can define path to load dataset for both train and test steps?(ex c:\feed\dataset...)

  modules.hpp
/**
*
* header file for 3 modules of FER system
* Face detection 
* Expression description
* Training & Recognition
*/
#ifndefmodules_hpp
#definemodules_hpp
#include "helpers.hpp"
#include "cv.h"
#include "highgui.h"
#include "ml.h"
using namespacecv;
classLandmark
{
public:
Landmark();
Landmark(CvRect box);
CvRect bbox;
boolisEmpty();
intgetX();
intgetY();
CvRect getRect();
};
classEye :publicLandmark
{
public:
Eye(){}
Eye(CvRect box){}
~Eye(){}
};
classEyebrow :publicLandmark
{
public:
CvPoint left;
CvPoint center;
CvPoint right;
Eyebrow(){}
Eyebrow(CvRect box){}
~Eyebrow(){}
};
classMouth :publicLandmark
{
public:
CvPoint left;
CvPoint center;
28
CvPoint upperCenter;
CvPoint lowerCenter;
CvPoint right;
Mouth(){}
Mouth(CvRect box){}
~Mouth(){}
};
classFace :publicLandmark
{
public:
CvRect bbox;
CvRect upperface;
CvRect lowerface;
Eye lefteye;
Eye righteye;
Mouth mouth;
Eyebrow lefteyebrow;
Eyebrow righteyebrow;
/**methods**/
Face(){}
Face(CvRect box){}
~Face(){}
voiddrawBox(IplImage* image, CvRect box);
voiddrawPoints(IplImage *image);
voiddrawElements(IplImage *image);
};
/********detector***********/
classFaceDetection
{
public:
Face face;
private:
CvMemStorage *buffer;
CvHaarClassifierCascade *faceCascade, *reyeCascade, *leyeCascade, *mouthCascade;
CvPoint currentROIlocation;
IplImage *image;
public:
FaceDetection(IplImage *image);
~FaceDetection();
Face getFace();
IplImage* getImage();
voidcalculatePoints();
booldetectElements();
voidsetRegions();
private:
booldetectFace();
voiddetectEyes();
CvRect setBrow(CvRect box);
voiddetectMouth();
voidsetEyebrows();
voidsetAbsoluteCoordinates(CvRect &r);
voidsetAbsoluteCoordinates(CvPoint &p);
voidsetCurrentROIlocation(intx, inty);
CvSeq* getMax(CvSeq * contours, doubleboxarea);
};
29
/*********extraction**********/
classFeatureExtraction
{
public:
FeatureExtraction();
FeatureExtraction(IplImage* upper, IplImage* lower);
~FeatureExtraction();
IplImage *upper;
IplImage *lower;
intindx;
floatfeature_vector[36*256];
public:
voidnormalize(IplImage* upper, IplImage* lower);
voidcalculateLBP();
voidsetLBPGrid(IplImage *img, intwidth, intheight);
};
/****Multiclass Training****/
classMultiTrain 
{
public:
MultiTrain();
~MultiTrain();
CvSVM SVM;
CvMat *trainData;
CvMat *labels;
CvTermCriteria criteria;
CvSVMParams params;
doubleACC;
voidcreateDataSet(string inputdir, string outputdir);
voidloadDataSet(string filename);
voidtrainModel(string outputdir);
voidloadModel(string filename);
intgetPrediction(IplImage *image);
voidtestModel(string filename);
voidcreateConfusionMatrix(string filename);
voidcalculateTrainDataCount(inttab[]);
voidprepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels);
voidperformCrossValidation(intparts);
private:
voidprocessData(string path, inti);
};
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata);
static intcomp_func_x(const void* _a, const void* _b, void* userdata);
#endif
30
modules.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
#include "lbp.hpp"
/****landmark****/
Landmark::Landmark()
{
this->bbox = cvRect(0,0,0,0);
}
Landmark::Landmark(CvRect box)
{
this->bbox = box;
}
boolLandmark::isEmpty()
{
if(this->bbox.height == 0 || this->bbox.width == 0)
return true;
return false;
}
intLandmark::getX()
{
return this->bbox.x;
}
intLandmark::getY()
{
return this->bbox.y;
}
CvRect Landmark::getRect()
{
return this->bbox;
}
/******Face******/
voidFace::drawBox(IplImage* image, CvRect box)
{
cvRectangle(image, cvPoint(box.x, box.y), cvPoint(box.x+box.width, box.y+box.height), CV_RGB(255,0,0),
1, 8, 0);
}
voidFace::drawElements(IplImage *image)
{
this->drawBox(image, this->bbox);
this->drawBox(image, this->lefteye.bbox);
this->drawBox(image, this->righteye.bbox);
this->drawBox(image, this->mouth.bbox);
}
/***********Face Detection*******************/
FaceDetection::FaceDetection(IplImage *image)
{
this->buffer = cvCreateMemStorage(0);
char*face = "../haarcascades/haarcascade_frontalface_default.xml";
char*eye_left= "../haarcascades/haarcascade_mcs_lefteye.xml";
char*eye_right = "../haarcascades/haarcascade_mcs_righteye.xml";
char*mouth = "../haarcascades/haarcascade_mcs_mouth.xml";
this->faceCascade = ( CvHaarClassifierCascade* )cvLoad( face, 0, 0, 0);
31
this->leyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_left, 0, 0, 0);
this->reyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_right, 0, 0, 0);
this->mouthCascade = ( CvHaarClassifierCascade* )cvLoad( mouth, 0, 0, 0);
this->currentROIlocation = cvPoint(0,0);
this->image = image;
this->face = Face();
}
FaceDetection::~FaceDetection()
{
cvReleaseHaarClassifierCascade( &faceCascade);
cvReleaseHaarClassifierCascade( &leyeCascade);
cvReleaseHaarClassifierCascade( &reyeCascade);
cvReleaseHaarClassifierCascade( &mouthCascade);
cvReleaseMemStorage( &buffer);
}
Face FaceDetection::getFace()
{
return this->face;
}
IplImage* FaceDetection::getImage()
{
return this->image;
}
boolFaceDetection::detectElements()
{
if(! this->detectFace()) return false;
this->detectEyes();
this->detectMouth();
this->setEyebrows();
this->setCurrentROIlocation(0,0);
return true;
}
boolFaceDetection::detectFace()
{
CvSeq *faces = cvHaarDetectObjects(this->image, faceCascade, buffer, 1.1, 3, 0, cvSize(30,30));
if(!faces->total) return false;
else
{
/**get the biggest detected face**/
cvSeqSort(faces, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem(faces, 0);
this->face.bbox = *r;
cvClearMemStorage(this->buffer);
}
return true;
}
voidFaceDetection::detectEyes()
{
/*left eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x, this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y);
CvSeq *eyes = cvHaarDetectObjects(this->image, this->leyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(eyes, comp_func, 0);
if( eyes->total != 0)
{
CvRect *left = (CvRect*) cvGetSeqElem( eyes, 0);
this->setAbsoluteCoordinates(*left);
this->face.lefteye.bbox = *left;
32
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
/*right eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y);
CvSeq *reyes = cvHaarDetectObjects(this->image, this->reyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(reyes, comp_func, 0);
if( reyes->total != 0)
{
CvRect *right = (CvRect*) cvGetSeqElem( reyes, 0);
this->setAbsoluteCoordinates(*right);
this->face.righteye.bbox = *right;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::detectMouth()
{
cvSetImageROI(image, cvRect(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2), this->face.bbox.width, (this->face.bbox.height/2)));
setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2));
CvSeq *mouth = cvHaarDetectObjects(image, mouthCascade, buffer, 1.1, 3,0, cvSize(1,1));
if(mouth->total)
{
cvSeqSort(mouth, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem( mouth, 0);
this->setAbsoluteCoordinates(*r);
this->face.mouth.bbox = *r;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::setEyebrows()
{
this->face.lefteyebrow.bbox = this->setBrow(this->face.lefteye.bbox);
this->face.righteyebrow.bbox = this->setBrow(this->face.righteye.bbox);
}
CvRect FaceDetection::setBrow(CvRect box)
{
intx = box.x - box.width/3;
inty = box.y - box.height*3/2;
intwidth = box.width*5/3;
intheight = box.height*2;
returncvRect(x, y, width, height);
}
voidFaceDetection::setRegions()
{
//upper
intx = this->face.lefteyebrow.bbox.x;
inty = this->face.lefteyebrow.bbox.y;
intwidth = this->face.lefteyebrow.bbox.width + this->face.righteyebrow.bbox.width;
intheight = this->face.lefteyebrow.bbox.height + this->face.lefteye.bbox.height;
33
this->face.upperface = cvRect(x, y, width, height);
//lower
x = this->face.lefteye.bbox.x;
width = (this->face.righteye.bbox.x+this->face.righteye.bbox.width) - this->face.lefteye.bbox.x;
y = this->face.mouth.bbox.y - this->face.mouth.bbox.height/2;
height = 2*this->face.mouth.bbox.height;
this->face.lowerface = cvRect(x,y,width,height);
}
voidFaceDetection::setCurrentROIlocation(intx, inty)
{
currentROIlocation.x = x;
currentROIlocation.y = y;
}
voidFaceDetection::setAbsoluteCoordinates(CvRect &r)
{
r.x += currentROIlocation.x;
r.y += currentROIlocation.y;
}
voidFaceDetection::setAbsoluteCoordinates(CvPoint &p)
{
p.x += currentROIlocation.x;
p.y += currentROIlocation.y;
}
/***********feature extraction ***************/
FeatureExtraction::FeatureExtraction()
{
this->upper = 0;
this->lower = 0;
this->indx = 0;
}
FeatureExtraction::FeatureExtraction(IplImage* upper, IplImage* lower)
{
this->normalize(upper, lower);
}
FeatureExtraction::~FeatureExtraction()
{
if(this->upper) cvReleaseImage(&this->upper);
if(this->lower) cvReleaseImage(&this->lower);
}
voidFeatureExtraction::normalize(IplImage* upper, IplImage* lower)
{
this->upper = cvCreateImage(cvSize(90,48), upper->depth, upper->nChannels);
this->lower = cvCreateImage(cvSize(72,48), lower->depth, lower->nChannels);
cvResize(upper, this->upper);
cvResize(lower, this->lower);
}
voidFeatureExtraction::calculateLBP()
{
this->indx = 0;
this->setLBPGrid(this->lower, 18, 12);
this->setLBPGrid(this->upper, 18, 12);
}
voidFeatureExtraction::setLBPGrid(IplImage *img, intwidth, intheight)
{
for(inti=0; i< (img->width/width); i++)
34
for(intj=0; j< (img->height/height); j++)
{
LBP lbp;
cvSetImageROI(img, cvRect(i*width,j*height, width, height));
lbp.createLBP(img);
lbp.histogram();
lbp.fillFeatureSet(this->feature_vector, this->indx);
this->indx+=256;
//cvRectangle(img, cvPoint(0,0), cvPoint(width, height), cvScalar(255,0,0));
cvResetImageROI(img);
}
}
/******MulticlassTraining****/
MultiTrain::MultiTrain()
{
this->trainData = 0;
this->labels = 0;
this->params = CvSVMParams();
this->params.term_crit.epsilon = 1.0000000116860974e-007;
this->params.term_crit.type = CV_TERMCRIT_EPS;
this->params.svm_type = CvSVM::C_SVC;
this->params.kernel_type = CvSVM::RBF;
this->params.gamma = 3.0000000000000001e-006;
this->params.C = 20;
}
MultiTrain::~MultiTrain()
{
if(this->trainData) cvReleaseMat(&this->trainData);
if(this->labels) cvReleaseMat(&this->labels);
}
voidMultiTrain::createDataSet(string inputdir, string outputdir)
{
/*
0 neutral
1 happiness
2 sadness
3 surprise
4 anger
5 fear
6 disgust
*/
vector<string> neutral = vector<string>();
vector<string> happy = vector<string>();
vector<string> sad = vector<string>();
vector<string> surprise = vector<string>();
vector<string> angry = vector<string>();
vector<string> fear = vector<string>();
vector<string> disgust = vector<string>();
listFiles(inputdir, "*neutr*", neutral);
listFiles(inputdir, "*happy*", happy);
listFiles(inputdir, "*sad*", sad);
listFiles(inputdir, "*surpr*", surprise);
listFiles(inputdir, "*ang*", angry);
listFiles(inputdir, "*fear*", fear);
listFiles(inputdir, "*disg*", disgust);
intcount = (int)(neutral.size()+happy.size()+sad.size()+surprise.size()+angry.size()+fear.size()+disgust.size());
35
cout<<count<<endl;
this->trainData = cvCreateMat(count, 36*256, CV_32FC1);
this->labels = cvCreateMat(count, 1, CV_32SC1);
cvZero(this->trainData);
cvZero(this->labels);
intj=0;
for(inti=0; i< (int)neutral.size(); i++)
{
cout<<"processing image # "<<i<<endl;
this->processData(inputdir+"/"+neutral[i], i);
CV_MAT_ELEM(*this->labels, int, i,0) = 0;
}
j += (int) neutral.size();
for(inti=0; i< (int)happy.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+happy[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 1;
}
j += (int) happy.size();
for(inti=0; i< (int)sad.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+sad[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 2;
}
j += (int) sad.size();
for(inti=0; i< (int)surprise.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+surprise[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 3;
}
j += (int) surprise.size();
for(inti=0; i< (int)angry.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+angry[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 4;
}
j += (int) angry.size();
for(inti=0; i< (int)fear.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+fear[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 5;
}
j += (int) fear.size();
for(inti=0; i< (int)disgust.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+disgust[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0)= 6;
}
CvFileStorage *file = cvOpenFileStorage((outputdir+"/emotrainset.xml").c_str(), 0, CV_STORAGE_WRITE);
cvWrite(file, "dataset", this->trainData);
cvWrite(file, "labels", this->labels);
36
cvReleaseFileStorage(&file);
}
voidMultiTrain::loadDataSet(string filename)
{
CvFileStorage* file = cvOpenFileStorage(filename.c_str(), 0, CV_STORAGE_READ);
this->trainData = (CvMat*)cvRead(file, cvGetFileNodeByName(file,0, "dataset"));
this->labels = (CvMat*) cvRead(file, cvGetFileNodeByName(file,0, "labels"));
cvReleaseFileStorage(&file);
}
voidMultiTrain::trainModel(string outputdir)
{
cout<<"Training the SVM classifier......"<<endl;
SVM.train(this->trainData, this->labels, 0,0,this->params);
SVM.save((outputdir+"/emo_svm_model.xml").c_str());
cout<<"SVM model saved to file: "<<"emo_svm_model.xml"<<endl;
}
voidMultiTrain::loadModel(string filename)
{
this->SVM.load(filename.c_str());
}
voidMultiTrain::calculateTrainDataCount(inttab[])
{
for(inti=0; i<this->labels->rows; i++)
{
tab[CV_MAT_ELEM(*this->labels, int, i,0)]++;
}
}
voidMultiTrain::prepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels)
{
intclass_integral[7] = {0};
for(inti=1; i<7; i++)
{
class_integral[i] = calcSum(class_counts,0,i-1);
}
inttest_iter, train_iter;
test_iter = train_iter = 0;
inttype = -1;
for(inti=0; i<this->trainData->rows; i++)
{
if(i < (class_integral[0]+class_counts[0]))
type = 0;
else if(i < (class_integral[1]+class_counts[1]))
type = 1;
else if(i < (class_integral[2]+class_counts[2]))
type = 2;
else if(i < (class_integral[3]+class_counts[3]))
type = 3;
else if(i < (class_integral[4]+class_counts[4]))
type = 4;
else if(i < (class_integral[5]+class_counts[5]))
type = 5;
else if(i < (class_integral[6]+class_counts[6]))
type = 6;
if(type>=0)
{
37
CvMat *r = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, r, i);
if((i >=(part*counts[type]+class_integral[type])) && (i<((part+1)*counts[type]
+class_integral[type])))
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*testdata, float, test_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*testlabels, int, test_iter, 0) = type;
}
test_iter++;
}
else
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*traindata, float, train_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*trainlabels, int, train_iter, 0) = type;
}
train_iter++;
}
}
}
}
voidMultiTrain::performCrossValidation(intparts)
{
vector<double> test_results;
intclass_counts[7] = {0};
intcounts [7] = {0};
if( parts !=0)
{
calculateTrainDataCount(class_counts);
for(inti=0; i<7; i++)
{
counts[i] = (class_counts[i]/parts);
}
}
if(parts == 0 || parts == 1) cout<<"Cross validation cannot be performed for such input values"<<endl;
else if(calcSum(counts, 7)<7) cout<<"The database is too small for performing the "<<parts<<"-fold cross
validation."<<endl;
else
{
//MAIN LOOP
for(intp=0; p<parts; p++)
{
//CREATE SETS
CvMat *traindata = 0;
CvMat *trainlabels = 0;
CvMat *testdata = 0;
CvMat *testlabels = 0;
testdata = cvCreateMat(calcSum(counts, 7), 36*256, CV_32FC1);
testlabels = cvCreateMat(calcSum(counts, 7),1, CV_32SC1);
traindata = cvCreateMat(this->trainData->rows - calcSum(counts, 7), 36*256, CV_32FC1);
trainlabels = cvCreateMat(this->trainData->rows - calcSum(counts,7), 1, CV_32SC1); 
38
//PREPARE SETS
this->prepareSets(class_counts, counts, p, parts, traindata, trainlabels, testdata, testlabels);
//PERFORM TRAINING
cout<<"Training the SVM classifier...part#"<<p<<endl;
SVM.train(traindata, trainlabels, 0,0, this->params);
//PERFORM TESTING
intTP = 0;
for(inti=0; i<(int)traindata->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(traindata, row, i);
intres = (int)this->SVM.predict(row);
if(res == trainlabels->data.i[i]) 
TP++;
}
doubleaccuracy = (double)TP/(double)traindata->rows;
cout<<"accuracy for part#"<<p<<" : "<<accuracy<<endl;
//RELEASE SETS
cvReleaseMat(&traindata);
cvReleaseMat(&trainlabels);
cvReleaseMat(&testdata);
cvReleaseMat(&testlabels);
this->SVM.clear();
cout<<"-----------------------------------------"<<endl;
}
}
}
intMultiTrain::getPrediction(IplImage *image)
{
intres = 0;
FaceDetection fd(image);
if( !fd.detectElements()) return-3;
fd.setRegions();
Face f = fd.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
CvMat *mat = cvCreateMat(1, 36*256, CV_32FC1);
//cvInitMatHeader(mat, 1, 36*256, CV_32FC1, fe.feature_vector);
for(inti=0; i<mat->cols; i++)
mat->data.fl[i] = fe.feature_vector[i];
res = (int) this->SVM.predict(mat);
cvReleaseMat(&mat);
returnres;
}
voidMultiTrain::testModel(string filename)
39
{
this->loadDataSet(filename);
intTP = 0; //true prediction counter
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<TP<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::createConfusionMatrix(string filename)
{
intconfusionMatrix[7][7]= {0};
intoverall[7] = {0};
intTP = 0;
this->loadDataSet(filename);
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
confusionMatrix[this->labels->data.i[i]][res]++;
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<"-------------------------------------"<<endl;
cout<<"----------confusion matrix-----------"<<endl;
for(inti=0; i<7; i++)
{
overall[i] = calcSum(confusionMatrix[i], 7);
for(intj=0; j<7; j++)
{
doubleperc = (double)confusionMatrix[i][j]*100/(double)overall[i];
cout<<perc<<"\t";
}
cout<<endl;
}
cout<<"-------------------------------------"<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::processData(string path, inti)
{
//(inputdir+"/"+filenames[i]).c_str()
IplImage *image = cvLoadImage(path.c_str(), 1);
FaceDetection detector(image);
if( detector.detectElements())
{
detector.setRegions();
40
Face f = detector.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
for(intj=0; j<36*256; j++)
{
this->trainData->data.fl[i*36*256+j] = fe.feature_vector[j];
}
cvReleaseImage(&up);
cvReleaseImage(&lo);
}
cvReleaseImage(&image);
}
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
intareaA = a->width * a->height;
intareaB = b->width * b->height;
if(areaA < areaB) return1;
else return-1;
}
static intcomp_func_x(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
if(a->x > b->x) return1;
else return-1;
}
lbp.hpp
/**
* author:
* class for LBP encoding
*/
#include "modules.hpp"
#include "cxcore.h"
classLBP 
{ 
public: 
LBP(); 
public: 
~LBP(); 
41
IplImage* image; 
IplImage* LBPimage; 
CvHistogram* hist;
public: 
/* calculate LBP features */
voidcreateLBP(IplImage *patch); 
/* create histogram of LBP features */
voidhistogram();
/* copy histogram to feature set */
voidLBP::fillFeatureSet(float*set, intstart_indx);
}; 
lbp.cpp
/**
* author: 
*/
#include "lbp.hpp"
LBP::LBP()
{
image =0;
LBPimage =0;
hist =0;
}
LBP::~LBP()
{
if(image) cvReleaseImage(&image);
if(LBPimage) cvReleaseImage(&LBPimage);
}
voidLBP::createLBP(IplImage* patch)
{
IplImage* temp_image = cvCreateImage(cvGetSize(patch), patch->depth, patch->nChannels);
cvCopy(patch, temp_image);
image = cvCreateImage(cvSize(temp_image->width, temp_image->height), 8, 1); 
if(temp_image->nChannels == 3) 
{
cvCvtColor(temp_image, image, CV_BGR2GRAY); 
}
LBPimage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
intcenter=0; 
intcenter_lbp=0; 
for(introw=1; row<image->height-1; row++) 
{
for(intcol=1; col<image->width-1; col++) 
{
center = cvGetReal2D(image, row, col);
center_lbp = 0; 
if(center >= cvGetReal2D(image, row-1, col-1)) 
{
center_lbp += 1; 
}
if(center >= cvGetReal2D(image, row-1, col)) 
42
{
center_lbp += 2; 
}
if(center >= cvGetReal2D(image, row-1, col+1)) 
{
center_lbp += 4; 
}
if(center >= cvGetReal2D(image, row, col-1)) 
{
center_lbp += 8; 
}
if(center >= cvGetReal2D(image, row, col+1)) 
{
center_lbp += 16; 
}
if(center >= cvGetReal2D(image, row+1, col-1)) 
{
center_lbp += 32; 
}
if(center >= cvGetReal2D(image, row+1, col)) 
{
center_lbp += 64; 
}
if(center >= cvGetReal2D(image, row+1, col+1)) 
{
center_lbp += 128; 
}
cvSetReal2D(LBPimage, row, col, center_lbp); 
}
}
cvReleaseImage(&temp_image); 
}
voidLBP::histogram()
{
intbins = 256;
inthsize[] = {bins};
floatrange[] = {0,256};
float* ranges[] = {range};
floatmin_value =0, max_value = 0;
IplImage * planes[] = {this->LBPimage};
this->hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(planes, this->hist, 0,0);
}
voidLBP::fillFeatureSet(float*set, intstart_indx)
{
for(inti=0; i<256; i++)
{
set[i+start_indx] = cvQueryHistValue_1D(hist, i);
}
}
43
helpers.hpp
/**
* author: 
* set of different functions
*/
#ifndefhelpers_hpp
#definehelpers_hpp
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#include <math.h>
#include <iostream>
using namespacestd;
/* function for listing files that match the pattern from directory*/
voidlistFiles(string directory, string pattern, vector<string> &files);
/* function for concatenating strings with integers */
string createSname(string path, string fname, string f, intindx, string ext);
/* function for concatenating strings - creating temporary file names */
string createFname(string path, string fname, string ext);
/* function for translating emotion codes */
string showResult(intcode);
/* function for suming the array values*/
intcalcSum(int*tab, intn);
/* function for suming the array values from indx1 to indx2*/
intcalcSum(int*tab, intidx1, intidx2);
#endif
helpers.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
voidlistFiles (string directory, string pattern, vector<string> &files)
{
string command = "dir "+directory+"\\"+pattern+" /B > temp.txt";
string d;
system(command.c_str());
ifstream in;
in.open("temp.txt", ifstream::in);
if( in.is_open())
{
while(true)
{
if(!(in>>d)) break;
files.push_back(d);
}
in.close();
}
44
}
string createSname(string path, string fname, string f, intindx, string ext)
{
//path +filename+ frame+ idnx + ext
string t;
stringstream s;
s<< indx;
s>> t;
string result = path+fname+f+t+ext;
returnresult;
}
string createFname(string path, string fname, string ext)
{
string result= path+fname+ext;
returnresult;
}
string showResult(intcode)
{
string result="";
switch(code)
{
case0:
result = "neutral";
break;
case1:
result = "happy";
break;
case2:
result = "sad";
break;
case3:
result = "surprised";
break;
case4:
result = "angry";
break;
case5:
result = "fear";
break;
case6:
result = "disgusted";
break;
}
returnresult;
}
intcalcSum(int*tab, intn)
{
intsum =0;
for(inti =0; i<n; i++)
sum+= tab[i];
returnsum;
}
intcalcSum(int*tab, intidx1, intidx2)
{
intsum=0;
for(inti=idx1; i<=idx2; i++)
sum+= tab[i];
45
returnsum;
}
tasks.hpp
/**
* author 
* functions for performing particular tasks
*/
#ifndeftasks_hpp
#definetasks_hpp
#include "modules.hpp"
/** show image **/
voidshow(IplImage *im);
/** sample emotion recognition **/
voidsample(string filename);
/** perform face detection **/
voidprocessDetection(string inputdir, string outputdir, string pattern);
/** capture snaphots (frames) from a video file **/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern);
/*****DEMO****/
classDemo
{
public:
Demo(string filename, string type);
~Demo();
vector<int> predictions;
string videofile;
string type;
voidprocessVideo();
voiddisplayPredictions(boolsave);
voidgetStatistics();
};
/************/
#endif
tasks.cpp
/**
* author: 
*/
#include "tasks.hpp"
/*********************DEMO***********************************/
Demo::Demo(string filename, string type)
{
this->videofile = filename;
this->predictions = vector<int>();
this->type = type;
}
46
Demo::~Demo(){}
voidDemo::processVideo()
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
cout<<"Loading classifier...."<<endl;
MultiTrain mt;
string path = "..\\datasets\\"+type+"\\";
mt.loadModel((path+"emo_svm_model.xml"));
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
{
cout<<"Processing video...."<<endl;
IplImage *frame=0;
intk = 0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if( k%10 == 0)
{
intres = (int)mt.getPrediction(frame);
cout<<"Prediction for frame #"<<k<<" => "<<res<<endl;
this->predictions.push_back(res);
}
k++;
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
}
voidDemo::displayPredictions(boolsave)
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
CvVideoWriter *writer = 0;
intisColor = 1;
intfps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
intframeWidth = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
intframeHeight = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
if(save)
{
writer = cvCreateVideoWriter("demo.avi",-1, fps, cvSize(frameWidth, frameHeight), isColor);
}
cvNamedWindow( "preview", 1 );
CvFont font;
cvInitFont(&font,0, 1.1f, 1.1f,0,2,8); 
string result ="";
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
47
{
IplImage *frame=0;
intk = 0, l=0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame) break;
if( k%10 == 0)
{ 
result = showResult(this->predictions[l++]);
cout<<"Prediction for frame #"<<k<<" => "<<result<<endl;
}
cvPutText(frame, result.c_str(), cvPoint(50,50), &font, cvScalar(255,0,0));
cvShowImage("preview",frame);
if(save && writer) 
{
cvWriteFrame(writer, frame);
}
cvWaitKey(1000/fps);
k++; 
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
cvDestroyWindow("preview");
if(writer) cvReleaseVideoWriter(&writer);
}
voidDemo::getStatistics()
{
intn, h, sd, su, a, f,d;
n=h=sd=su=a=f=d=0;
for(inti=0; i< (int)this->predictions.size(); i++)
{
switch(this->predictions[i])
{
case0:
n++; break;
case1:
h++; break;
case2:
sd++; break;
case3:
su++; break;
case4:
a++; break;
case5:
f++; break;
case6:
d++; break;
}
}
cout<<"---------------------------------"<<endl;
cout<<"Emotions recognized:"<<endl;
cout<<"Neutral: " << (double)n/(double)this->predictions.size() <<endl;
cout<<"Happy: " << (double)h/(double)this->predictions.size() <<endl;
cout<<"Sad: " << (double)sd/(double)this->predictions.size() <<endl;
cout<<"Suprised: " << (double)su/(double)this->predictions.size() <<endl;
cout<<"Angry: " << (double)a/(double)this->predictions.size() <<endl;
cout<<"Fear: " << (double)f/(double)this->predictions.size() <<endl;
48
cout<<"Disgusted: " << (double)d/(double)this->predictions.size() <<endl;
cout<<"---------------------------------"<<endl;
}
/********************************************************/
voidshow(IplImage *im)
{
cvNamedWindow("preview");
cvShowImage("preview", im);
cvWaitKey(0);
cvDestroyWindow("preview");
}
/********************************************************/
voidsample(string filename, string type)
{
CvFont font;
cvInitFont(&font,0, 1.0f, 1.0f,0,2,8); 
IplImage *img = cvLoadImage(filename.c_str(), 1);
MultiTrain mt;
cout<<"Loading the classifier...."<<endl;
mt.loadModel("..\\datasets\\"+type+"\\emo_svm_model.xml");
intres = mt.getPrediction(img);
string result = showResult(res);
cvPutText(img, result.c_str(), cvPoint(20,20), &font, cvScalar(255,255,0));
cout<<result<<endl;
show(img);
cvReleaseImage(&img);
}
/************************************************************/
voidprocessDetection(string inputdir, string outputdir, string pattern="*.jpg")
{
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames);
try
{
IplImage *image=0;
for(inti=0; i<(int)filenames.size(); i++)
{
image = cvLoadImage((inputdir+"/"+filenames[i]).c_str(), 1);
FaceDetection detector(image);
if( ! detector.detectElements()) continue;
detector.detectElements();
detector.setRegions();
Face f = detector.getFace();
cvRectangle(image, cvPoint(f.upperface.x, f.upperface.y), cvPoint(f.upperface.x+
f.upperface.width, f.upperface.y+f.upperface.height), cvScalar(0,255,255));
cvRectangle(image, cvPoint(f.lowerface.x, f.lowerface.y),
cvPoint(f.lowerface.x+f.lowerface.width, f.lowerface.y+f.lowerface.height), cvScalar(0,0,255));
cvSaveImage((outputdir+"/"+filenames[i]).c_str(), image);
cout<<"image #"<<i<<(outputdir+"/"+filenames[i]).c_str()<<" is being saved..."<<endl;
}
cvReleaseImage(&image);
}
catch(cv::Exception &e)
{
cout<<e.what()<<endl;
49
}
}
/**********************************************************************/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern="*")
{
CvCapture *capture = 0;
cvNamedWindow( "preview", 1);
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames); 
for(inti=0; i< (int) filenames.size(); i++)
{
capture = cvCaptureFromAVI((inputdir+"/"+filenames[i]).c_str());
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
continue;
}
else
{
IplImage *frame=0;
intk=1;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if(k%rate == 1)
{
//if(k>30)
{
string fname = filenames[i].substr(0, (filenames[i].length()-4));
fname = createSname(outputdir+"/", fname, "_frame", k, ".jpg");
cvSaveImage(fname.c_str(), frame); 
cout<<"file: "<<fname<<" is saving..."<<endl;
}
}
k++;
cvShowImage("preview",frame);
cvWaitKey(1000/fps);
}
cvReleaseImage(&frame);
}
}

cvReleaseCapture(&capture);
cvDestroyWindow("preview");
}
50
main.cpp
#include "modules.hpp"
#include "lbp.hpp"
#include "tasks.hpp"
voidplayDemo(string type)
{
//string videofile = "../sample_videos/disgs_0004_3.mpg";
string videofile = "../sample_videos/happy_0014_1.mpg";
//string videofile = "../sample_videos/sadns_0005_1.mpg";
Demo demo(videofile, type);
demo.processVideo();
demo.displayPredictions(false);
demo.getStatistics();
}
intmain()
{
try
{
playDemo("FEED");
}
catch( Exception &e)
{
cout<<e.what()<<endl;
}
system("pause");
return0;
}

how i can add and load data file to train svm classifier?

i read a pdf file that is use FG-NET dataset to train svm. FG-NET Facial Expression and Emotion Database consists of MPEG video files with spontaneous emotions recorded. Database contains examples gathered from 18 subjects ( 9 female and 9 male). Proposed system was trained with captured video frames in which the displayed emotion is very representative. The training set consists of 675 images of seven states neutral and emotional (surprise, fear, disgust, sadness, happiness and anger). inputdir , outputdir,path words that used in this c++ program ,how can i define inputdir,outputdir,path? how i can define path to load dataset for both train and test steps?(ex c:\feed\dataset...)c:\feed\dataset...) i upload program at:http://rapidshare.com/share/15E45C30BB237A797F355EC82F09B8C1

  modules.hpp
/**
*
* header file for 3 modules of FER system
* Face detection 
* Expression description
* Training & Recognition
*/
#ifndefmodules_hpp
#definemodules_hpp
#include "helpers.hpp"
#include "cv.h"
#include "highgui.h"
#include "ml.h"
using namespacecv;
classLandmark
{
public:
Landmark();
Landmark(CvRect box);
CvRect bbox;
boolisEmpty();
intgetX();
intgetY();
CvRect getRect();
};
classEye :publicLandmark
{
public:
Eye(){}
Eye(CvRect box){}
~Eye(){}
};
classEyebrow :publicLandmark
{
public:
CvPoint left;
CvPoint center;
CvPoint right;
Eyebrow(){}
Eyebrow(CvRect box){}
~Eyebrow(){}
};
classMouth :publicLandmark
{
public:
CvPoint left;
CvPoint center;
28
CvPoint upperCenter;
CvPoint lowerCenter;
CvPoint right;
Mouth(){}
Mouth(CvRect box){}
~Mouth(){}
};
classFace :publicLandmark
{
public:
CvRect bbox;
CvRect upperface;
CvRect lowerface;
Eye lefteye;
Eye righteye;
Mouth mouth;
Eyebrow lefteyebrow;
Eyebrow righteyebrow;
/**methods**/
Face(){}
Face(CvRect box){}
~Face(){}
voiddrawBox(IplImage* image, CvRect box);
voiddrawPoints(IplImage *image);
voiddrawElements(IplImage *image);
};
/********detector***********/
classFaceDetection
{
public:
Face face;
private:
CvMemStorage *buffer;
CvHaarClassifierCascade *faceCascade, *reyeCascade, *leyeCascade, *mouthCascade;
CvPoint currentROIlocation;
IplImage *image;
public:
FaceDetection(IplImage *image);
~FaceDetection();
Face getFace();
IplImage* getImage();
voidcalculatePoints();
booldetectElements();
voidsetRegions();
private:
booldetectFace();
voiddetectEyes();
CvRect setBrow(CvRect box);
voiddetectMouth();
voidsetEyebrows();
voidsetAbsoluteCoordinates(CvRect &r);
voidsetAbsoluteCoordinates(CvPoint &p);
voidsetCurrentROIlocation(intx, inty);
CvSeq* getMax(CvSeq * contours, doubleboxarea);
};
29
/*********extraction**********/
classFeatureExtraction
{
public:
FeatureExtraction();
FeatureExtraction(IplImage* upper, IplImage* lower);
~FeatureExtraction();
IplImage *upper;
IplImage *lower;
intindx;
floatfeature_vector[36*256];
public:
voidnormalize(IplImage* upper, IplImage* lower);
voidcalculateLBP();
voidsetLBPGrid(IplImage *img, intwidth, intheight);
};
/****Multiclass Training****/
classMultiTrain 
{
public:
MultiTrain();
~MultiTrain();
CvSVM SVM;
CvMat *trainData;
CvMat *labels;
CvTermCriteria criteria;
CvSVMParams params;
doubleACC;
voidcreateDataSet(string inputdir, string outputdir);
voidloadDataSet(string filename);
voidtrainModel(string outputdir);
voidloadModel(string filename);
intgetPrediction(IplImage *image);
voidtestModel(string filename);
voidcreateConfusionMatrix(string filename);
voidcalculateTrainDataCount(inttab[]);
voidprepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels);
voidperformCrossValidation(intparts);
private:
voidprocessData(string path, inti);
};
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata);
static intcomp_func_x(const void* _a, const void* _b, void* userdata);
#endif
30
modules.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
#include "lbp.hpp"
/****landmark****/
Landmark::Landmark()
{
this->bbox = cvRect(0,0,0,0);
}
Landmark::Landmark(CvRect box)
{
this->bbox = box;
}
boolLandmark::isEmpty()
{
if(this->bbox.height == 0 || this->bbox.width == 0)
return true;
return false;
}
intLandmark::getX()
{
return this->bbox.x;
}
intLandmark::getY()
{
return this->bbox.y;
}
CvRect Landmark::getRect()
{
return this->bbox;
}
/******Face******/
voidFace::drawBox(IplImage* image, CvRect box)
{
cvRectangle(image, cvPoint(box.x, box.y), cvPoint(box.x+box.width, box.y+box.height), CV_RGB(255,0,0),
1, 8, 0);
}
voidFace::drawElements(IplImage *image)
{
this->drawBox(image, this->bbox);
this->drawBox(image, this->lefteye.bbox);
this->drawBox(image, this->righteye.bbox);
this->drawBox(image, this->mouth.bbox);
}
/***********Face Detection*******************/
FaceDetection::FaceDetection(IplImage *image)
{
this->buffer = cvCreateMemStorage(0);
char*face = "../haarcascades/haarcascade_frontalface_default.xml";
char*eye_left= "../haarcascades/haarcascade_mcs_lefteye.xml";
char*eye_right = "../haarcascades/haarcascade_mcs_righteye.xml";
char*mouth = "../haarcascades/haarcascade_mcs_mouth.xml";
this->faceCascade = ( CvHaarClassifierCascade* )cvLoad( face, 0, 0, 0);
31
this->leyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_left, 0, 0, 0);
this->reyeCascade = ( CvHaarClassifierCascade* )cvLoad( eye_right, 0, 0, 0);
this->mouthCascade = ( CvHaarClassifierCascade* )cvLoad( mouth, 0, 0, 0);
this->currentROIlocation = cvPoint(0,0);
this->image = image;
this->face = Face();
}
FaceDetection::~FaceDetection()
{
cvReleaseHaarClassifierCascade( &faceCascade);
cvReleaseHaarClassifierCascade( &leyeCascade);
cvReleaseHaarClassifierCascade( &reyeCascade);
cvReleaseHaarClassifierCascade( &mouthCascade);
cvReleaseMemStorage( &buffer);
}
Face FaceDetection::getFace()
{
return this->face;
}
IplImage* FaceDetection::getImage()
{
return this->image;
}
boolFaceDetection::detectElements()
{
if(! this->detectFace()) return false;
this->detectEyes();
this->detectMouth();
this->setEyebrows();
this->setCurrentROIlocation(0,0);
return true;
}
boolFaceDetection::detectFace()
{
CvSeq *faces = cvHaarDetectObjects(this->image, faceCascade, buffer, 1.1, 3, 0, cvSize(30,30));
if(!faces->total) return false;
else
{
/**get the biggest detected face**/
cvSeqSort(faces, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem(faces, 0);
this->face.bbox = *r;
cvClearMemStorage(this->buffer);
}
return true;
}
voidFaceDetection::detectEyes()
{
/*left eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x, this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y);
CvSeq *eyes = cvHaarDetectObjects(this->image, this->leyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(eyes, comp_func, 0);
if( eyes->total != 0)
{
CvRect *left = (CvRect*) cvGetSeqElem( eyes, 0);
this->setAbsoluteCoordinates(*left);
this->face.lefteye.bbox = *left;
32
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
/*right eye*/
cvSetImageROI(this->image, cvRect(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y, this->face.bbox.width/2, (this->face.bbox.height*2/3)));
this->setCurrentROIlocation(this->face.bbox.x+(this->face.bbox.width/2), this->face.bbox.y);
CvSeq *reyes = cvHaarDetectObjects(this->image, this->reyeCascade, this->buffer, 1.1, 3,0, cvSize(5,5));
cvSeqSort(reyes, comp_func, 0);
if( reyes->total != 0)
{
CvRect *right = (CvRect*) cvGetSeqElem( reyes, 0);
this->setAbsoluteCoordinates(*right);
this->face.righteye.bbox = *right;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::detectMouth()
{
cvSetImageROI(image, cvRect(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2), this->face.bbox.width, (this->face.bbox.height/2)));
setCurrentROIlocation(this->face.bbox.x, this->face.bbox.y+(this->face.bbox.height/2));
CvSeq *mouth = cvHaarDetectObjects(image, mouthCascade, buffer, 1.1, 3,0, cvSize(1,1));
if(mouth->total)
{
cvSeqSort(mouth, comp_func, 0);
CvRect *r = (CvRect*) cvGetSeqElem( mouth, 0);
this->setAbsoluteCoordinates(*r);
this->face.mouth.bbox = *r;
}
cvClearMemStorage(this->buffer);
cvResetImageROI(this->image);
}
voidFaceDetection::setEyebrows()
{
this->face.lefteyebrow.bbox = this->setBrow(this->face.lefteye.bbox);
this->face.righteyebrow.bbox = this->setBrow(this->face.righteye.bbox);
}
CvRect FaceDetection::setBrow(CvRect box)
{
intx = box.x - box.width/3;
inty = box.y - box.height*3/2;
intwidth = box.width*5/3;
intheight = box.height*2;
returncvRect(x, y, width, height);
}
voidFaceDetection::setRegions()
{
//upper
intx = this->face.lefteyebrow.bbox.x;
inty = this->face.lefteyebrow.bbox.y;
intwidth = this->face.lefteyebrow.bbox.width + this->face.righteyebrow.bbox.width;
intheight = this->face.lefteyebrow.bbox.height + this->face.lefteye.bbox.height;
33
this->face.upperface = cvRect(x, y, width, height);
//lower
x = this->face.lefteye.bbox.x;
width = (this->face.righteye.bbox.x+this->face.righteye.bbox.width) - this->face.lefteye.bbox.x;
y = this->face.mouth.bbox.y - this->face.mouth.bbox.height/2;
height = 2*this->face.mouth.bbox.height;
this->face.lowerface = cvRect(x,y,width,height);
}
voidFaceDetection::setCurrentROIlocation(intx, inty)
{
currentROIlocation.x = x;
currentROIlocation.y = y;
}
voidFaceDetection::setAbsoluteCoordinates(CvRect &r)
{
r.x += currentROIlocation.x;
r.y += currentROIlocation.y;
}
voidFaceDetection::setAbsoluteCoordinates(CvPoint &p)
{
p.x += currentROIlocation.x;
p.y += currentROIlocation.y;
}
/***********feature extraction ***************/
FeatureExtraction::FeatureExtraction()
{
this->upper = 0;
this->lower = 0;
this->indx = 0;
}
FeatureExtraction::FeatureExtraction(IplImage* upper, IplImage* lower)
{
this->normalize(upper, lower);
}
FeatureExtraction::~FeatureExtraction()
{
if(this->upper) cvReleaseImage(&this->upper);
if(this->lower) cvReleaseImage(&this->lower);
}
voidFeatureExtraction::normalize(IplImage* upper, IplImage* lower)
{
this->upper = cvCreateImage(cvSize(90,48), upper->depth, upper->nChannels);
this->lower = cvCreateImage(cvSize(72,48), lower->depth, lower->nChannels);
cvResize(upper, this->upper);
cvResize(lower, this->lower);
}
voidFeatureExtraction::calculateLBP()
{
this->indx = 0;
this->setLBPGrid(this->lower, 18, 12);
this->setLBPGrid(this->upper, 18, 12);
}
voidFeatureExtraction::setLBPGrid(IplImage *img, intwidth, intheight)
{
for(inti=0; i< (img->width/width); i++)
34
for(intj=0; j< (img->height/height); j++)
{
LBP lbp;
cvSetImageROI(img, cvRect(i*width,j*height, width, height));
lbp.createLBP(img);
lbp.histogram();
lbp.fillFeatureSet(this->feature_vector, this->indx);
this->indx+=256;
//cvRectangle(img, cvPoint(0,0), cvPoint(width, height), cvScalar(255,0,0));
cvResetImageROI(img);
}
}
/******MulticlassTraining****/
MultiTrain::MultiTrain()
{
this->trainData = 0;
this->labels = 0;
this->params = CvSVMParams();
this->params.term_crit.epsilon = 1.0000000116860974e-007;
this->params.term_crit.type = CV_TERMCRIT_EPS;
this->params.svm_type = CvSVM::C_SVC;
this->params.kernel_type = CvSVM::RBF;
this->params.gamma = 3.0000000000000001e-006;
this->params.C = 20;
}
MultiTrain::~MultiTrain()
{
if(this->trainData) cvReleaseMat(&this->trainData);
if(this->labels) cvReleaseMat(&this->labels);
}
voidMultiTrain::createDataSet(string inputdir, string outputdir)
{
/*
0 neutral
1 happiness
2 sadness
3 surprise
4 anger
5 fear
6 disgust
*/
vector<string> neutral = vector<string>();
vector<string> happy = vector<string>();
vector<string> sad = vector<string>();
vector<string> surprise = vector<string>();
vector<string> angry = vector<string>();
vector<string> fear = vector<string>();
vector<string> disgust = vector<string>();
listFiles(inputdir, "*neutr*", neutral);
listFiles(inputdir, "*happy*", happy);
listFiles(inputdir, "*sad*", sad);
listFiles(inputdir, "*surpr*", surprise);
listFiles(inputdir, "*ang*", angry);
listFiles(inputdir, "*fear*", fear);
listFiles(inputdir, "*disg*", disgust);
intcount = (int)(neutral.size()+happy.size()+sad.size()+surprise.size()+angry.size()+fear.size()+disgust.size());
35
cout<<count<<endl;
this->trainData = cvCreateMat(count, 36*256, CV_32FC1);
this->labels = cvCreateMat(count, 1, CV_32SC1);
cvZero(this->trainData);
cvZero(this->labels);
intj=0;
for(inti=0; i< (int)neutral.size(); i++)
{
cout<<"processing image # "<<i<<endl;
this->processData(inputdir+"/"+neutral[i], i);
CV_MAT_ELEM(*this->labels, int, i,0) = 0;
}
j += (int) neutral.size();
for(inti=0; i< (int)happy.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+happy[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 1;
}
j += (int) happy.size();
for(inti=0; i< (int)sad.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+sad[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 2;
}
j += (int) sad.size();
for(inti=0; i< (int)surprise.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+surprise[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 3;
}
j += (int) surprise.size();
for(inti=0; i< (int)angry.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+angry[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 4;
}
j += (int) angry.size();
for(inti=0; i< (int)fear.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+fear[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0) = 5;
}
j += (int) fear.size();
for(inti=0; i< (int)disgust.size(); i++)
{
cout<<"processing image # "<<i+j<<endl;
this->processData(inputdir+"/"+disgust[i], i+j);
CV_MAT_ELEM(*this->labels, int, i+j,0)= 6;
}
CvFileStorage *file = cvOpenFileStorage((outputdir+"/emotrainset.xml").c_str(), 0, CV_STORAGE_WRITE);
cvWrite(file, "dataset", this->trainData);
cvWrite(file, "labels", this->labels);
36
cvReleaseFileStorage(&file);
}
voidMultiTrain::loadDataSet(string filename)
{
CvFileStorage* file = cvOpenFileStorage(filename.c_str(), 0, CV_STORAGE_READ);
this->trainData = (CvMat*)cvRead(file, cvGetFileNodeByName(file,0, "dataset"));
this->labels = (CvMat*) cvRead(file, cvGetFileNodeByName(file,0, "labels"));
cvReleaseFileStorage(&file);
}
voidMultiTrain::trainModel(string outputdir)
{
cout<<"Training the SVM classifier......"<<endl;
SVM.train(this->trainData, this->labels, 0,0,this->params);
SVM.save((outputdir+"/emo_svm_model.xml").c_str());
cout<<"SVM model saved to file: "<<"emo_svm_model.xml"<<endl;
}
voidMultiTrain::loadModel(string filename)
{
this->SVM.load(filename.c_str());
}
voidMultiTrain::calculateTrainDataCount(inttab[])
{
for(inti=0; i<this->labels->rows; i++)
{
tab[CV_MAT_ELEM(*this->labels, int, i,0)]++;
}
}
voidMultiTrain::prepareSets(intclass_counts[], intcounts[], intpart, intparts, CvMat *traindata, CvMat *trainlabels,
CvMat *testdata, CvMat *testlabels)
{
intclass_integral[7] = {0};
for(inti=1; i<7; i++)
{
class_integral[i] = calcSum(class_counts,0,i-1);
}
inttest_iter, train_iter;
test_iter = train_iter = 0;
inttype = -1;
for(inti=0; i<this->trainData->rows; i++)
{
if(i < (class_integral[0]+class_counts[0]))
type = 0;
else if(i < (class_integral[1]+class_counts[1]))
type = 1;
else if(i < (class_integral[2]+class_counts[2]))
type = 2;
else if(i < (class_integral[3]+class_counts[3]))
type = 3;
else if(i < (class_integral[4]+class_counts[4]))
type = 4;
else if(i < (class_integral[5]+class_counts[5]))
type = 5;
else if(i < (class_integral[6]+class_counts[6]))
type = 6;
if(type>=0)
{
37
CvMat *r = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, r, i);
if((i >=(part*counts[type]+class_integral[type])) && (i<((part+1)*counts[type]
+class_integral[type])))
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*testdata, float, test_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*testlabels, int, test_iter, 0) = type;
}
test_iter++;
}
else
{
for(intj= 0; j<r->cols; j++)
{
CV_MAT_ELEM(*traindata, float, train_iter, j) = r->data.fl[j];
CV_MAT_ELEM(*trainlabels, int, train_iter, 0) = type;
}
train_iter++;
}
}
}
}
voidMultiTrain::performCrossValidation(intparts)
{
vector<double> test_results;
intclass_counts[7] = {0};
intcounts [7] = {0};
if( parts !=0)
{
calculateTrainDataCount(class_counts);
for(inti=0; i<7; i++)
{
counts[i] = (class_counts[i]/parts);
}
}
if(parts == 0 || parts == 1) cout<<"Cross validation cannot be performed for such input values"<<endl;
else if(calcSum(counts, 7)<7) cout<<"The database is too small for performing the "<<parts<<"-fold cross
validation."<<endl;
else
{
//MAIN LOOP
for(intp=0; p<parts; p++)
{
//CREATE SETS
CvMat *traindata = 0;
CvMat *trainlabels = 0;
CvMat *testdata = 0;
CvMat *testlabels = 0;
testdata = cvCreateMat(calcSum(counts, 7), 36*256, CV_32FC1);
testlabels = cvCreateMat(calcSum(counts, 7),1, CV_32SC1);
traindata = cvCreateMat(this->trainData->rows - calcSum(counts, 7), 36*256, CV_32FC1);
trainlabels = cvCreateMat(this->trainData->rows - calcSum(counts,7), 1, CV_32SC1); 
38
//PREPARE SETS
this->prepareSets(class_counts, counts, p, parts, traindata, trainlabels, testdata, testlabels);
//PERFORM TRAINING
cout<<"Training the SVM classifier...part#"<<p<<endl;
SVM.train(traindata, trainlabels, 0,0, this->params);
//PERFORM TESTING
intTP = 0;
for(inti=0; i<(int)traindata->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(traindata, row, i);
intres = (int)this->SVM.predict(row);
if(res == trainlabels->data.i[i]) 
TP++;
}
doubleaccuracy = (double)TP/(double)traindata->rows;
cout<<"accuracy for part#"<<p<<" : "<<accuracy<<endl;
//RELEASE SETS
cvReleaseMat(&traindata);
cvReleaseMat(&trainlabels);
cvReleaseMat(&testdata);
cvReleaseMat(&testlabels);
this->SVM.clear();
cout<<"-----------------------------------------"<<endl;
}
}
}
intMultiTrain::getPrediction(IplImage *image)
{
intres = 0;
FaceDetection fd(image);
if( !fd.detectElements()) return-3;
fd.setRegions();
Face f = fd.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
CvMat *mat = cvCreateMat(1, 36*256, CV_32FC1);
//cvInitMatHeader(mat, 1, 36*256, CV_32FC1, fe.feature_vector);
for(inti=0; i<mat->cols; i++)
mat->data.fl[i] = fe.feature_vector[i];
res = (int) this->SVM.predict(mat);
cvReleaseMat(&mat);
returnres;
}
voidMultiTrain::testModel(string filename)
39
{
this->loadDataSet(filename);
intTP = 0; //true prediction counter
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<TP<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::createConfusionMatrix(string filename)
{
intconfusionMatrix[7][7]= {0};
intoverall[7] = {0};
intTP = 0;
this->loadDataSet(filename);
for(inti=0; i<(int)this->trainData->rows; i++)
{
CvMat *row = cvCreateMat(1, 36*256, CV_32FC1);
cvGetRow(this->trainData, row, i);
intres = (int)this->SVM.predict(row);
confusionMatrix[this->labels->data.i[i]][res]++;
if(res == this->labels->data.i[i]) 
TP++;
}
cout<<"-------------------------------------"<<endl;
cout<<"----------confusion matrix-----------"<<endl;
for(inti=0; i<7; i++)
{
overall[i] = calcSum(confusionMatrix[i], 7);
for(intj=0; j<7; j++)
{
doubleperc = (double)confusionMatrix[i][j]*100/(double)overall[i];
cout<<perc<<"\t";
}
cout<<endl;
}
cout<<"-------------------------------------"<<endl;
this->ACC = (double)TP/(double)this->trainData->rows;
cout<<"accuracy:"<<this->ACC<<endl;
}
voidMultiTrain::processData(string path, inti)
{
//(inputdir+"/"+filenames[i]).c_str()
IplImage *image = cvLoadImage(path.c_str(), 1);
FaceDetection detector(image);
if( detector.detectElements())
{
detector.setRegions();
40
Face f = detector.getFace();
cvSetImageROI(image, f.upperface);
IplImage *up = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, up, 0);
cvResetImageROI(image);
cvSetImageROI(image, f.lowerface);
IplImage *lo = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
cvCopy(image, lo, 0);
cvResetImageROI(image);
FeatureExtraction fe(up, lo);
fe.calculateLBP();
for(intj=0; j<36*256; j++)
{
this->trainData->data.fl[i*36*256+j] = fe.feature_vector[j];
}
cvReleaseImage(&up);
cvReleaseImage(&lo);
}
cvReleaseImage(&image);
}
/**cvSeq comparison function**/
static intcomp_func(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
intareaA = a->width * a->height;
intareaB = b->width * b->height;
if(areaA < areaB) return1;
else return-1;
}
static intcomp_func_x(const void* _a, const void* _b, void* userdata)
{
CvRect * a = (CvRect*)_a;
CvRect * b = (CvRect*)_b;
if(a->x > b->x) return1;
else return-1;
}
lbp.hpp
/**
* author:
* class for LBP encoding
*/
#include "modules.hpp"
#include "cxcore.h"
classLBP 
{ 
public: 
LBP(); 
public: 
~LBP(); 
41
IplImage* image; 
IplImage* LBPimage; 
CvHistogram* hist;
public: 
/* calculate LBP features */
voidcreateLBP(IplImage *patch); 
/* create histogram of LBP features */
voidhistogram();
/* copy histogram to feature set */
voidLBP::fillFeatureSet(float*set, intstart_indx);
}; 
lbp.cpp
/**
* author: 
*/
#include "lbp.hpp"
LBP::LBP()
{
image =0;
LBPimage =0;
hist =0;
}
LBP::~LBP()
{
if(image) cvReleaseImage(&image);
if(LBPimage) cvReleaseImage(&LBPimage);
}
voidLBP::createLBP(IplImage* patch)
{
IplImage* temp_image = cvCreateImage(cvGetSize(patch), patch->depth, patch->nChannels);
cvCopy(patch, temp_image);
image = cvCreateImage(cvSize(temp_image->width, temp_image->height), 8, 1); 
if(temp_image->nChannels == 3) 
{
cvCvtColor(temp_image, image, CV_BGR2GRAY); 
}
LBPimage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
intcenter=0; 
intcenter_lbp=0; 
for(introw=1; row<image->height-1; row++) 
{
for(intcol=1; col<image->width-1; col++) 
{
center = cvGetReal2D(image, row, col);
center_lbp = 0; 
if(center >= cvGetReal2D(image, row-1, col-1)) 
{
center_lbp += 1; 
}
if(center >= cvGetReal2D(image, row-1, col)) 
42
{
center_lbp += 2; 
}
if(center >= cvGetReal2D(image, row-1, col+1)) 
{
center_lbp += 4; 
}
if(center >= cvGetReal2D(image, row, col-1)) 
{
center_lbp += 8; 
}
if(center >= cvGetReal2D(image, row, col+1)) 
{
center_lbp += 16; 
}
if(center >= cvGetReal2D(image, row+1, col-1)) 
{
center_lbp += 32; 
}
if(center >= cvGetReal2D(image, row+1, col)) 
{
center_lbp += 64; 
}
if(center >= cvGetReal2D(image, row+1, col+1)) 
{
center_lbp += 128; 
}
cvSetReal2D(LBPimage, row, col, center_lbp); 
}
}
cvReleaseImage(&temp_image); 
}
voidLBP::histogram()
{
intbins = 256;
inthsize[] = {bins};
floatrange[] = {0,256};
float* ranges[] = {range};
floatmin_value =0, max_value = 0;
IplImage * planes[] = {this->LBPimage};
this->hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges, 1);
cvCalcHist(planes, this->hist, 0,0);
}
voidLBP::fillFeatureSet(float*set, intstart_indx)
{
for(inti=0; i<256; i++)
{
set[i+start_indx] = cvQueryHistValue_1D(hist, i);
}
}
43
helpers.hpp
/**
* author: 
* set of different functions
*/
#ifndefhelpers_hpp
#definehelpers_hpp
#include <vector>
#include <string>
#include <sstream>
#include <fstream>
#include <math.h>
#include <iostream>
using namespacestd;
/* function for listing files that match the pattern from directory*/
voidlistFiles(string directory, string pattern, vector<string> &files);
/* function for concatenating strings with integers */
string createSname(string path, string fname, string f, intindx, string ext);
/* function for concatenating strings - creating temporary file names */
string createFname(string path, string fname, string ext);
/* function for translating emotion codes */
string showResult(intcode);
/* function for suming the array values*/
intcalcSum(int*tab, intn);
/* function for suming the array values from indx1 to indx2*/
intcalcSum(int*tab, intidx1, intidx2);
#endif
helpers.cpp
/**
* author: 
*/
#include "helpers.hpp"
#include "modules.hpp"
voidlistFiles (string directory, string pattern, vector<string> &files)
{
string command = "dir "+directory+"\\"+pattern+" /B > temp.txt";
string d;
system(command.c_str());
ifstream in;
in.open("temp.txt", ifstream::in);
if( in.is_open())
{
while(true)
{
if(!(in>>d)) break;
files.push_back(d);
}
in.close();
}
44
}
string createSname(string path, string fname, string f, intindx, string ext)
{
//path +filename+ frame+ idnx + ext
string t;
stringstream s;
s<< indx;
s>> t;
string result = path+fname+f+t+ext;
returnresult;
}
string createFname(string path, string fname, string ext)
{
string result= path+fname+ext;
returnresult;
}
string showResult(intcode)
{
string result="";
switch(code)
{
case0:
result = "neutral";
break;
case1:
result = "happy";
break;
case2:
result = "sad";
break;
case3:
result = "surprised";
break;
case4:
result = "angry";
break;
case5:
result = "fear";
break;
case6:
result = "disgusted";
break;
}
returnresult;
}
intcalcSum(int*tab, intn)
{
intsum =0;
for(inti =0; i<n; i++)
sum+= tab[i];
returnsum;
}
intcalcSum(int*tab, intidx1, intidx2)
{
intsum=0;
for(inti=idx1; i<=idx2; i++)
sum+= tab[i];
45
returnsum;
}
tasks.hpp
/**
* author 
* functions for performing particular tasks
*/
#ifndeftasks_hpp
#definetasks_hpp
#include "modules.hpp"
/** show image **/
voidshow(IplImage *im);
/** sample emotion recognition **/
voidsample(string filename);
/** perform face detection **/
voidprocessDetection(string inputdir, string outputdir, string pattern);
/** capture snaphots (frames) from a video file **/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern);
/*****DEMO****/
classDemo
{
public:
Demo(string filename, string type);
~Demo();
vector<int> predictions;
string videofile;
string type;
voidprocessVideo();
voiddisplayPredictions(boolsave);
voidgetStatistics();
};
/************/
#endif
tasks.cpp
/**
* author: 
*/
#include "tasks.hpp"
/*********************DEMO***********************************/
Demo::Demo(string filename, string type)
{
this->videofile = filename;
this->predictions = vector<int>();
this->type = type;
}
46
Demo::~Demo(){}
voidDemo::processVideo()
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
cout<<"Loading classifier...."<<endl;
MultiTrain mt;
string path = "..\\datasets\\"+type+"\\";
mt.loadModel((path+"emo_svm_model.xml"));
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
{
cout<<"Processing video...."<<endl;
IplImage *frame=0;
intk = 0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if( k%10 == 0)
{
intres = (int)mt.getPrediction(frame);
cout<<"Prediction for frame #"<<k<<" => "<<res<<endl;
this->predictions.push_back(res);
}
k++;
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
}
voidDemo::displayPredictions(boolsave)
{
CvCapture * capture = cvCaptureFromAVI(this->videofile.c_str());
CvVideoWriter *writer = 0;
intisColor = 1;
intfps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
intframeWidth = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
intframeHeight = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
if(save)
{
writer = cvCreateVideoWriter("demo.avi",-1, fps, cvSize(frameWidth, frameHeight), isColor);
}
cvNamedWindow( "preview", 1 );
CvFont font;
cvInitFont(&font,0, 1.1f, 1.1f,0,2,8); 
string result ="";
if( !capture)
{
cout<<"problems with avi file"<<endl;
}
else
47
{
IplImage *frame=0;
intk = 0, l=0;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame) break;
if( k%10 == 0)
{ 
result = showResult(this->predictions[l++]);
cout<<"Prediction for frame #"<<k<<" => "<<result<<endl;
}
cvPutText(frame, result.c_str(), cvPoint(50,50), &font, cvScalar(255,0,0));
cvShowImage("preview",frame);
if(save && writer) 
{
cvWriteFrame(writer, frame);
}
cvWaitKey(1000/fps);
k++; 
}
cvReleaseImage(&frame);
}
cvReleaseCapture(&capture);
cvDestroyWindow("preview");
if(writer) cvReleaseVideoWriter(&writer);
}
voidDemo::getStatistics()
{
intn, h, sd, su, a, f,d;
n=h=sd=su=a=f=d=0;
for(inti=0; i< (int)this->predictions.size(); i++)
{
switch(this->predictions[i])
{
case0:
n++; break;
case1:
h++; break;
case2:
sd++; break;
case3:
su++; break;
case4:
a++; break;
case5:
f++; break;
case6:
d++; break;
}
}
cout<<"---------------------------------"<<endl;
cout<<"Emotions recognized:"<<endl;
cout<<"Neutral: " << (double)n/(double)this->predictions.size() <<endl;
cout<<"Happy: " << (double)h/(double)this->predictions.size() <<endl;
cout<<"Sad: " << (double)sd/(double)this->predictions.size() <<endl;
cout<<"Suprised: " << (double)su/(double)this->predictions.size() <<endl;
cout<<"Angry: " << (double)a/(double)this->predictions.size() <<endl;
cout<<"Fear: " << (double)f/(double)this->predictions.size() <<endl;
48
cout<<"Disgusted: " << (double)d/(double)this->predictions.size() <<endl;
cout<<"---------------------------------"<<endl;
}
/********************************************************/
voidshow(IplImage *im)
{
cvNamedWindow("preview");
cvShowImage("preview", im);
cvWaitKey(0);
cvDestroyWindow("preview");
}
/********************************************************/
voidsample(string filename, string type)
{
CvFont font;
cvInitFont(&font,0, 1.0f, 1.0f,0,2,8); 
IplImage *img = cvLoadImage(filename.c_str(), 1);
MultiTrain mt;
cout<<"Loading the classifier...."<<endl;
mt.loadModel("..\\datasets\\"+type+"\\emo_svm_model.xml");
intres = mt.getPrediction(img);
string result = showResult(res);
cvPutText(img, result.c_str(), cvPoint(20,20), &font, cvScalar(255,255,0));
cout<<result<<endl;
show(img);
cvReleaseImage(&img);
}
/************************************************************/
voidprocessDetection(string inputdir, string outputdir, string pattern="*.jpg")
{
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames);
try
{
IplImage *image=0;
for(inti=0; i<(int)filenames.size(); i++)
{
image = cvLoadImage((inputdir+"/"+filenames[i]).c_str(), 1);
FaceDetection detector(image);
if( ! detector.detectElements()) continue;
detector.detectElements();
detector.setRegions();
Face f = detector.getFace();
cvRectangle(image, cvPoint(f.upperface.x, f.upperface.y), cvPoint(f.upperface.x+
f.upperface.width, f.upperface.y+f.upperface.height), cvScalar(0,255,255));
cvRectangle(image, cvPoint(f.lowerface.x, f.lowerface.y),
cvPoint(f.lowerface.x+f.lowerface.width, f.lowerface.y+f.lowerface.height), cvScalar(0,0,255));
cvSaveImage((outputdir+"/"+filenames[i]).c_str(), image);
cout<<"image #"<<i<<(outputdir+"/"+filenames[i]).c_str()<<" is being saved..."<<endl;
}
cvReleaseImage(&image);
}
catch(cv::Exception &e)
{
cout<<e.what()<<endl;
49
}
}
/**********************************************************************/
voidcaptureSnapshots(string inputdir, string outputdir, intrate, string pattern="*")
{
CvCapture *capture = 0;
cvNamedWindow( "preview", 1);
vector<string> filenames = vector<string>();
listFiles(inputdir, pattern, filenames); 
for(inti=0; i< (int) filenames.size(); i++)
{
capture = cvCaptureFromAVI((inputdir+"/"+filenames[i]).c_str());
intfps = ( int)cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
if( !capture)
{
cout<<"problems with avi file"<<endl;
continue;
}
else
{
IplImage *frame=0;
intk=1;
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)  break;
if(k%rate == 1)
{
//if(k>30)
{
string fname = filenames[i].substr(0, (filenames[i].length()-4));
fname = createSname(outputdir+"/", fname, "_frame", k, ".jpg");
cvSaveImage(fname.c_str(), frame); 
cout<<"file: "<<fname<<" is saving..."<<endl;
}
}
k++;
cvShowImage("preview",frame);
cvWaitKey(1000/fps);
}
cvReleaseImage(&frame);
}
}

cvReleaseCapture(&capture);
cvDestroyWindow("preview");
}
50
main.cpp
#include "modules.hpp"
#include "lbp.hpp"
#include "tasks.hpp"
voidplayDemo(string type)
{
//string videofile = "../sample_videos/disgs_0004_3.mpg";
string videofile = "../sample_videos/happy_0014_1.mpg";
//string videofile = "../sample_videos/sadns_0005_1.mpg";
Demo demo(videofile, type);
demo.processVideo();
demo.displayPredictions(false);
demo.getStatistics();
}
intmain()
{
try
{
playDemo("FEED");
}
catch( Exception &e)
{
cout<<e.what()<<endl;
}
system("pause");
return0;
}