opencv dnn yolo3-tiny can not get the same result as darknet yolo3-tiny
Hello everybody! first, I train my traindataset by original Darknet yolo3-tiny on ubuntu system. second, I test my valdataset by the pretrained model,it predicts very good result; But, WHEN I USE the opencv dnn yolo like https://github.com/opencv/opencv/blob..., I can not predict the same result as The Darknet, where Iam in trouble.! Can anyone explain the reason for me AND help me deal with the problem.
THERE is my opencv dnn yolo code
#include <fstream>
#include <sstream>
#include <iostream>
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace std;
using namespace dnn;
float confThreshold, nmsThreshold;
std::vector<std::string> classes;
void postprocess(Mat& frame, const std::vector<Mat>& out, Net& net);
void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame);
std::vector<String> getOutputsNames(const Net& net);
int main(int argc, char** argv)
{
confThreshold = 0.8;
nmsThreshold = 0.4;
double scale = 1.0/255;
//int scale = 1;
Scalar mean = Scalar(0,0,0);
bool swapRB = true;
int inpWidth = 416;
int inpHeight = 416;
std::string modelPath = "yolov3-tiny_111000.weights";
std::string configPath = "tiny.cfg";
// Open file with classes names.
if (1)
{
std::string file = "target.names";
std::ifstream ifs(file.c_str());
if (!ifs.is_open())
CV_Error(Error::StsError, "File " + file + " not found");
std::string line;
while (std::getline(ifs, line))
{
classes.push_back(line);
}
}
// Load a model.
Net net = readNetFromDarknet( configPath, modelPath);
net.setPreferableBackend(DNN_BACKEND_OPENCV);
net.setPreferableTarget(DNN_TARGET_CPU);
std::vector<String> outNames = net.getUnconnectedOutLayersNames();
// Create a window
static const std::string kWinName = "Deep learning object detection in OpenCV";
namedWindow(kWinName, WINDOW_NORMAL);
// Open a video file or an image file or a camera stream.
VideoCapture cap;
cap.open("1.mp4");
// Process frames.
Mat frame, blob;
int index = 0;
while (waitKey(1) < 0)
{
cap >> frame;
index++;
if (index % 10 != 0)
continue;
if (frame.empty())
{
waitKey();
break;
}
// Create a 4D blob from a frame.
Size inpSize(inpWidth > 0 ? inpWidth : frame.cols,
inpHeight > 0 ? inpHeight : frame.rows);
blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false, CV_32F);
cout << "blob" << endl;
cout << blob<< endl;
//blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false, CV_8U);
// Run a model.
net.setInput(blob);
if (net.getLayer(0)->outputNameToIndex("im_info") != -1) // Faster-RCNN or R-FCN
{
resize(frame, frame, inpSize);
Mat imInfo = (Mat_<float>(1, 3) << inpSize.height, inpSize.width, 1.6f);
net.setInput(imInfo, "im_info");
}
std::vector<Mat> outs;
net.forward(outs, outNames);
postprocess(frame, outs, net);
// Put efficiency information.
std::vector<double> layersTimes;
double freq = getTickFrequency() / 1000;
double t = net.getPerfProfile(layersTimes) / freq;
std::string label = format("Inference time: %.2f ms", t);
putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));
imshow(kWinName, frame);
}
return 0;
}
void postprocess(Mat& frame, const std::vector<Mat>& outs, Net& net)
{
static std::vector<int> outLayers = net.getUnconnectedOutLayers();
static std::string outLayerType = net.getLayer(outLayers[0])->type;
std::vector<int> classIds;
std::vector<float> confidences;
std::vector<Rect> boxes;
if (outLayerType == "DetectionOutput")
{
// Network produces output blob with a shape 1x1xNx7 where N is a number of
// detections and an ...