Ask Your Question
0

Opencv C++ Holistically-Nested Edge Detection

asked 2019-10-14 16:25:57 -0600

Espor gravatar image

Hello,

I am trying to run the pretrained Holistically-Nested Edge Detection Model from https://github.com/s9xie/hed in C++. I have found this Python based example: https://www.pyimagesearch.com/2019/03... and the Opencv Sample in Python: https://github.com/opencv/opencv/blob.... I also looked up the general aproach to custom layers in Opencv here: https://docs.opencv.org/master/dc/db1.... But after several hours of trying I am still not able to get anything to run. Can anyone recommend a good example or tutorial on this matter? Thanks in advance.

edit retag flag offensive close merge delete

Comments

berak gravatar imageberak ( 2019-10-15 00:25:28 -0600 )edit

https://github.com/opencv/opencv/blob... works good with model files downloaded here

sturkmen gravatar imagesturkmen ( 2019-10-15 08:45:46 -0600 )edit

1 answer

Sort by ยป oldest newest most voted
0

answered 2020-01-16 09:00:29 -0600

Here is an implementation based on @berak suggestion :

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <opencv2/dnn/layer.details.hpp>
#include <opencv2/dnn/shape_utils.hpp>

using namespace cv;
using namespace std;
using namespace cv::dnn;


class myCropLayer : public Layer
{
public:
    myCropLayer(const LayerParams &params) : Layer(params)
    {
    }

    static cv::Ptr<Layer> create(LayerParams& params)
    {
        return cv::Ptr<Layer>(new myCropLayer(params));
    }

    virtual bool getMemoryShapes(const std::vector<std::vector<int> > &inputs,
                                 const int requiredOutputs,
                                 std::vector<std::vector<int> > &outputs,
                                 std::vector<std::vector<int> > &internals) const CV_OVERRIDE
    {
        CV_UNUSED(requiredOutputs); CV_UNUSED(internals);
        std::vector<int> outShape(4);
        outShape[0] = inputs[0][0];  // batch size
        outShape[1] = inputs[0][1];  // number of channels
        outShape[2] = inputs[1][2];
        outShape[3] = inputs[1][3];
        outputs.assign(1, outShape);
        return false;
    }

    virtual void forward(std::vector<Mat*> &input, std::vector<Mat> &output, std::vector<Mat> &internals) CV_OVERRIDE
    {
         cv::Mat * inp = input[0];
        cv::Mat  out = output[0];
        int ystart = (inp->size[2] - out.size[2]) / 2;
        int xstart = (inp->size[3] - out.size[3]) / 2;
        int yend = ystart + out.size[2];
        int xend = xstart + out.size[3];

        const int batchSize = inp->size[0];
        const int numChannels = inp->size[1];
        const int height = out.size[2];
        const int width = out.size[3];

        int sz[] = { (int)batchSize, numChannels, height, width };
        out.create(4, sz, CV_32F);
        for(int i=0; i<batchSize; i++)
        {
            for(int j=0; j<numChannels; j++)
            {
                cv::Mat plane(inp->size[2], inp->size[3], CV_32F, inp->ptr<float>(i,j));
                cv::Mat crop = plane(cv::Range(ystart,yend), cv::Range(xstart,xend));
                cv::Mat targ(height, width, CV_32F, out.ptr<float>(i,j));
                crop.copyTo(targ);
            }
        }
    }


    virtual void forward(cv::InputArrayOfArrays inputs_arr,
                         cv::OutputArrayOfArrays outputs_arr,
                         cv::OutputArrayOfArrays internals_arr) CV_OVERRIDE
    {
        cerr << "myCropLayer:forward ENTERNING" << endl << flush ;

        std::vector<cv::Mat> inputs, outputs;
        inputs_arr.getMatVector(inputs);
        outputs_arr.getMatVector(outputs);

        cv::Mat& inp = inputs[0];
        cv::Mat& out = outputs[0];

        int ystart = (inp.size[2] - out.size[2]) / 2;
        int xstart = (inp.size[3] - out.size[3]) / 2;
        int yend = ystart + out.size[2];
        int xend = xstart + out.size[3];

        const int batchSize = inp.size[0];
        const int numChannels = inp.size[1];
        const int height = out.size[2];
        const int width = out.size[3];

        int sz[] = { (int)batchSize, numChannels, height, width };
        out.create(4, sz, CV_32F);
        for(int i=0; i<batchSize; i++)
        {
            for(int j=0; j<numChannels; j++)
            {
                cv::Mat plane(inp.size[2], inp.size[3], CV_32F, inp.ptr<float>(i,j));
                cv::Mat crop = plane(cv::Range(ystart,yend), cv::Range(xstart,xend));
                cv::Mat targ(height, width, CV_32F, out.ptr<float>(i,j));
                crop.copyTo(targ);
            }    
};



int main( int argc, char* argv[] )
{

    CV_DNN_REGISTER_LAYER_CLASS(Crop, myCropLayer);
    Net net = readNet("/home/jfeldmar/Code/HED_EdgeDetection/Edge_detection/deploy.prototxt", "/home/jfeldmar/Code/HED_EdgeDetection/Edge_detection/hed_pretrained_bsds.caffemodel");


     cv::Mat img = cv::imread("/home/jfeldmar/Code/HED_EdgeDetection/Edge_detection/colin_mcalpin.jpg");
    cv::Size reso(128,128);
    Mat theInput ;
    resize( img, theInput, reso ) ;
    cv::Mat blob = blobFromImage(theInput, 1.0, reso, cv::Scalar(104.00698793, 116.66876762, 122.67891434), false, false);


    net.setInput(blob);
    cv::Mat out = net.forward(); // outputBlobs contains all output blobs for each layer ...
(more)
edit flag offensive delete link more

Question Tools

1 follower

Stats

Asked: 2019-10-14 16:25:57 -0600

Seen: 1,917 times

Last updated: Oct 14 '19