Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

ofc. you could try to use opencv's face recognition classes for multiple persons, or the MACE Filter there for a single person, but maybe you want to try out opencv's new dnn methods:

the concept here is to run (color !) images through a pretrained cnn, and obtain a 1x128 "feature" vector, which can easily be compared using the L2 norm, to find out, if it's the same.

// you'll need the pretrained facenet dnn model from here (30.1mb):
//    https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7

static Mat processFace(Net net, Mat image) {
    Mat blob= Dnn.blobFromImage(image, 1./255, new Size(96,96), new Scalar(0,0,0,0), true, false);
    net.setInput(blob);
    return net.forward().clone();
}

public static void main(String[] args) {
    // Load the native library.
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat img1 = Imgcodecs.imread("../img/face1.png");
    Mat img2 = Imgcodecs.imread("../img/face2.jpg");
    Net net  = Dnn.readNetFromTorch("openface.nn4.small2.v1.t7");
    Mat feature1 = processFace(net, img1);
    Mat feature2 = processFace(net, img2);
    double distance = Core.norm(feature1, feature2);
    System.out.println("distance: " + distance);
}

<

ofc. you could try to use opencv's face recognition classes for multiple persons, or the MACE Filter there for a single person, but maybe you want to try out opencv's new dnn methods:

the concept here is to run (color !) images through a pretrained cnn, and obtain a 1x128 "feature" vector, which can easily be compared using the L2 norm, to find out, if it's the same.

// you'll need the pretrained facenet dnn model from here (30.1mb):
//    https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7

static Mat processFace(Net net, Mat image) img) {
    Mat blob= Dnn.blobFromImage(image, blob = Dnn.blobFromImage(img, 1./255, new Size(96,96), new Scalar(0,0,0,0), Scalar.all(0), true, false);
    net.setInput(blob);
    return net.forward().clone();
}

public static void main(String[] args) {
    // Load the native library.
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat img1 = Imgcodecs.imread("../img/face1.png");
    Mat img2 = Imgcodecs.imread("../img/face2.jpg");
    Net net  = Dnn.readNetFromTorch("openface.nn4.small2.v1.t7");
    Mat feature1 = processFace(net, img1);
    Mat feature2 = processFace(net, img2);
    double distance = Core.norm(feature1, feature2);
    System.out.println("distance: " + distance);
}

<

you could try to use opencv's face recognition classes for multiple persons, or the MACE Filter there for a single person, but maybe you want to try out opencv's new dnn methods:

the concept here is to run (color !) images through a pretrained cnn, and obtain a 1x128 "feature" vector, which can easily be compared using the L2 norm, to find out, if it's the same.

// you'll need the pretrained facenet dnn model from here (30.1mb):
//    https://raw.githubusercontent.com/pyannote/pyannote-data/master/openface.nn4.small2.v1.t7

static Mat processFace(Net net, Mat img) {
    Mat blob = Dnn.blobFromImage(img, 1./255, new Size(96,96), Scalar.all(0), true, false);
    net.setInput(blob);
    return net.forward().clone();
}

public static void main(String[] args) {
    // Load the native library.
    System.loadLibrary(Core.NATIVE_LIBRARY_NAME);

    Mat img1 = Imgcodecs.imread("../img/face1.png");
    Mat img2 = Imgcodecs.imread("../img/face2.jpg");
    Net net  = Dnn.readNetFromTorch("openface.nn4.small2.v1.t7");

    Mat img1 = Imgcodecs.imread("../img/face1.png");
    Mat img2 = Imgcodecs.imread("../img/face2.jpg");

    Mat feature1 = processFace(net, img1);
    Mat feature2 = processFace(net, img2);
    double distance = Core.norm(feature1, feature2);
    System.out.println("distance: " + distance);
}