Ask Your Question

Revision history [back]

opencv dnn shows error when use resnet50.cfg from darknet

I train my model with darknet dnn, i used my own images, i used the tutorial https://pjreddie.com/darknet/train-cifar/

when test my model on darknet everything works well, but when i test on dnn opencv, it shows the next error: Error: Assertion failed (inputs[0] == inputs[i]) in getMemoryShapes, file C:\Users\opencv\sources\modules\dnn\src\layers\eltwise_layer.cpp, line 115

the error occurs in especific line: Mat prob = clasificacion.forward();

why the error occurs ???????

My code opencv:

String modelConfiguration_class = "resnet50.cfg";
String modelWeights_class =  "resnet50_last.weights";
clasificacion = readNetFromDarknet(modelConfiguration_class, modelWeights_class);
clasificacion.setPreferableBackend(DNN_BACKEND_OPENCV);
clasificacion.setPreferableTarget(DNN_TARGET_CPU);
frame = cv::imread("file_path");

Mat blob=blobFromImage(frame, 1.0/255.0, cvSize(256, 256), Scalar(0,0,0), true, false);
clasificacion.setInput(blob);
Mat prob = clasificacion.forward();

My resnet50.cfg:

[net]
# Training
#batch=128
#subdivisions=64

# Testing
batch=1
subdivisions=1

height=256
width=256
max_crop=448
channels=3
momentum=0.9
decay=0.0005
flip=0
rotate=0

burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=30000

angle=0
hue=.1
saturation=.75
exposure=.75
aspect=.75

[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky

[maxpool]
size=2
stride=2

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky


# Conv 4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

#Conv 5
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
filters=4
size=1
stride=1
pad=1
activation=linear

[avgpool]

[softmax]
groups=1

opencv dnn shows error when use resnet50.cfg from darknet

I train my model with darknet dnn, i used my own images, i used the tutorial https://pjreddie.com/darknet/train-cifar/

when test my model on darknet everything works well, but when i test on dnn opencv, it shows the next error: Error: Assertion failed (inputs[0] == inputs[i]) in getMemoryShapes, file C:\Users\opencv\sources\modules\dnn\src\layers\eltwise_layer.cpp, line 115

the error occurs in especific line: Mat prob = clasificacion.forward();

why the error occurs ???????

I test others models with yolo_tyny.cfg an yolov3_ssd.cfg and all works well.

My opencv version is 3.4.2 SO windows 10 Qt c++

My code opencv:

String modelConfiguration_class = "resnet50.cfg";
String modelWeights_class =  "resnet50_last.weights";
clasificacion = readNetFromDarknet(modelConfiguration_class, modelWeights_class);
clasificacion.setPreferableBackend(DNN_BACKEND_OPENCV);
clasificacion.setPreferableTarget(DNN_TARGET_CPU);
frame = cv::imread("file_path");

Mat blob=blobFromImage(frame, 1.0/255.0, cvSize(256, 256), Scalar(0,0,0), true, false);
clasificacion.setInput(blob);
Mat prob = clasificacion.forward();

My resnet50.cfg:

[net]
# Training
#batch=128
#subdivisions=64

# Testing
batch=1
subdivisions=1

height=256
width=256
max_crop=448
channels=3
momentum=0.9
decay=0.0005
flip=0
rotate=0

burn_in=1000
learning_rate=0.1
policy=poly
power=4
max_batches=30000

angle=0
hue=.1
saturation=.75
exposure=.75
aspect=.75

[convolutional]
batch_normalize=1
filters=64
size=7
stride=2
pad=1
activation=leaky

[maxpool]
size=2
stride=2

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=64
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=128
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky


# Conv 4
[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=256
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=1024
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

#Conv 5
[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=2
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=1
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=512
size=3
stride=1
pad=1
activation=leaky

[convolutional]
batch_normalize=1
filters=2048
size=1
stride=1
pad=1
activation=linear

[shortcut]
from=-4
activation=leaky

[convolutional]
filters=4
size=1
stride=1
pad=1
activation=linear

[avgpool]

[softmax]
groups=1