Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

DNN/Tensorflow API works in python but not c++

Hi, I'm fairly new to training my own NN but I have gotten it to work but only partially. For some reason, I can only detect objects in python but not in c++. In python (3.6) this will detect objects as intended:

import cv2 as cv

cvNet = cv.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'ssd_graph.pbtxt')

img = cv.imread('image2.jpg')
rows = img.shape[0]
cols = img.shape[1]
cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
cvOut = cvNet.forward()

for detection in cvOut[0,0,:,:]:
    score = float(detection[2])
    if score > 0.3:
        left = detection[3] * cols
        top = detection[4] * rows
        right = detection[5] * cols
        bottom = detection[6] * rows
        cv.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness=2)

cv.imshow('img', img)
cv.waitKey()

However, a very similar program in c++ runs without errors but does not return any results:

#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

#include <fstream>
#include <sstream>
#include <iostream>

using namespace cv;
using namespace dnn;
using namespace std;

int main()
{
    String modelConfiguration = "ssd_graph.pbtxt";
    String modelWeights = "frozen_inference_graph.pb";
    Mat blob;

    Net net = readNetFromTensorflow(modelWeights, modelConfiguration);

    Mat img = imread("image2.jpg");
    int rows = img.rows;
    int cols = img.cols;

    //blobFromImage(frame, blob, 1 / 255.0, Size(inpWidth, inpHeight), Scalar(0, 0, 0), true, false);
    blobFromImage(img, blob, 1 / 127.5, Size(299, 299), Scalar(127.5, 127.5, 127.5), true, false);

    //Sets the input to the network
    net.setInput(blob);

    // Runs the forward pass to get output of the output layers
    vector<Mat> outs;
    net.forward(outs); //, getOutputsNames(net)

    for (int i=0; i < outs.size(); i++) {
        Mat detection;
        detection = outs[i];
        float* data = (float*)outs[i].data;
        float score = float(detection.data[2]);
        if (score >= 0.0) {
            int left = detection.data[3] * cols;
            int top = detection.data[4] * rows;
            int right = detection.data[5] * cols;
            int bottom = detection.data[6] * rows;
            rectangle(img, Point(int(left), int(top)), Point(int(right), int(bottom)), (23, 230, 210), 2);          // img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness = 2);
            cout << detection.data[1] << endl;; //Detection[1] is name label
        }
    }

    imshow("img", img);
    waitKey();

    return 0;
}

I'm using python 3.6, trained the model using tensorflow 1.12 using the SSD_Inception_V2_coco pre-trained model, opencv 4.0.0. Can anyone point me in the right direction? Thanks!

DNN/Tensorflow API works in python but not c++

Hi, I'm fairly new to training my own NN but I have gotten it to work but only partially. For some reason, I can only detect objects in python but not in c++. In python (3.6) this will detect objects as intended:

import cv2 as cv

cvNet = cv.dnn.readNetFromTensorflow('frozen_inference_graph.pb', 'ssd_graph.pbtxt')

img = cv.imread('image2.jpg')
rows = img.shape[0]
cols = img.shape[1]
cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
cvOut = cvNet.forward()

for detection in cvOut[0,0,:,:]:
    score = float(detection[2])
    if score > 0.3:
        left = detection[3] * cols
        top = detection[4] * rows
        right = detection[5] * cols
        bottom = detection[6] * rows
        cv.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness=2)

cv.imshow('img', img)
cv.waitKey()

However, a very similar program in c++ runs without errors but does not return any results:

#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>

#include <fstream>
#include <sstream>
#include <iostream>

using namespace cv;
using namespace dnn;
using namespace std;

int main()
{
    String modelConfiguration = "ssd_graph.pbtxt";
    String modelWeights = "frozen_inference_graph.pb";
    Mat blob;

    Net net = readNetFromTensorflow(modelWeights, modelConfiguration);

    Mat img = imread("image2.jpg");
    int rows = img.rows;
    int cols = img.cols;

    //blobFromImage(frame, blob, 1 / 255.0, Size(inpWidth, inpHeight), Scalar(0, 0, 0), true, false);
    blobFromImage(img, blob, 1 / 127.5, Size(299, 299), Scalar(127.5, 127.5, 127.5), true, false);

    //Sets the input to the network
    net.setInput(blob);

    // Runs the forward pass to get output of the output layers
    vector<Mat> outs;
    net.forward(outs); //, getOutputsNames(net)

    for (int i=0; i < outs.size(); i++) {
        Mat detection;
        detection = outs[i];
        float* data = (float*)outs[i].data;
        float score = float(detection.data[2]);
        if (score >= 0.0) {
            int left = detection.data[3] * cols;
            int top = detection.data[4] * rows;
            int right = detection.data[5] * cols;
            int bottom = detection.data[6] * rows;
            rectangle(img, Point(int(left), int(top)), Point(int(right), int(bottom)), (23, 230, 210), 2);          // img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness = 2);
            cout << detection.data[1] << endl;; //Detection[1] is name label
        }
    }

    imshow("img", img);
    waitKey();

    return 0;
}

I'm using python 3.6, trained the model using tensorflow 1.12 using the SSD_Inception_V2_coco pre-trained model, opencv 4.0.0. Can anyone point me in the right direction? Thanks!

Update: I tried following the advise here but optimize_for_inference.py gives an error [KeyError: "The following input nodes were not found: {'Mul'}\n"]