The big difference of tensorflow object detection between PC and android
System information OpenCV 3.4.1 Operating System / Platform : Ubuntu 14.04,Android Studio
Detailed description:I use tensorflow object detection(https://github.com/tensorfl... to train a detector,but when I test the model on PC and my cell phone,I find a strange thing.In PC,the model can detect the object accurately ,but on cell phone,the result is quite different,but the code and the models are exactly the same.
PC Code:
``` void TestDetector_Image() {
// load net
Net net = dnn::readNetFromTensorflow("Test.pb",
"Test.pbtxt");
// input network
Mat srcImage=imread("1.jpg");
Mat inputBlob = blobFromImage(srcImage, 1 / 127.5f,
Size(512, 512),
Scalar(127.5, 127.5, 127.5),
true, false);
net.setInput(inputBlob);
// forward
Mat detection = net.forward();
// get results
Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());
vector<Rect> detectedRects;
for (int i = 0; i < detectionMat.rows; i++)
{
float confidence = detectionMat.at<float>(i, 2);
if (confidence > 0.5)
{
int objectClass = (int)(detectionMat.at<float>(i, 1));
int left = static_cast<int>(detectionMat.at<float>(i, 3) * srcImage.cols);
int top = static_cast<int>(detectionMat.at<float>(i, 4) * srcImage.rows);
int right = static_cast<int>(detectionMat.at<float>(i, 5) * srcImage.cols);
int bottom = static_cast<int>(detectionMat.at<float>(i, 6) * srcImage.rows);
Rect box(Point(left, top), Point(right, bottom));
detectedRects.push_back(box);
}
}
// print results
for(int i=0;i<detectedRects.size();++i)
{
cv::rectangle(srcImage,detectedRects[i],Scalar(0,255,255),2);
printf("%d: %d %d %d %d\n",i,detectedRects[i].x,detectedRects[i].y,detectedRects[i].width,detectedRects[i].height);
}
imwrite("Result.jpg",srcImage);
}
```
Android Code:
``` java code
public class Detector implements AutoCloseable {
private static boolean libraryFound = false;
// Used to load the 'native-lib' library on application startup.
static {
try {
System.loadLibrary("native-lib");
libraryFound = true;
} catch (UnsatisfiedLinkError error) {
error.printStackTrace();
Log.e(TAG, "libnative-lib.so not found");
}
}
private final Object lockObj = new Object();
public Detector() {
if(!libraryFound) {
return;
}
allocate();
if(nativeHandler == 0) throw new RuntimeException("Detector allocate error.");
}
@Override
public void close() throws Exception {
synchronized (lockObj) {
if(nativeHandler == 0) return;
deallocate();
nativeHandler = 0;
}
}
public int init(AssetManager assetManager) {
return nativeInit(assetManager);
}
public Rect detect(Bitmap bitmap) {
if(bitmap == null) return null;
if(bitmap.getConfig() != Bitmap.Config.ARGB_8888) return null;
return nativeDetect(bitmap);
}
////////////////////////////// Native Code //////////////////////////
private long nativeHandler;
private native void allocate();
private native void deallocate();
private native int nativeInit(AssetManager assetManager);
private native Rect nativeDetect(Bitmap bitmap);
}
jni code called by detect() in java code JNIEXPORT jobject JNICALL DETECTOR_METHOD(nativeDetect)(JNIEnv *env, jobject instance, jobject bitmap) {
AndroidBitmapInfo info;
void* pixels = 0;
int ret;
if((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOG_E("AndroidBitmap_getInfo() failed.");
return NULL;
}
if(info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
LOG_W("Bitmap format is not RGBA_8888");
return NULL;
}
if((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
LOG_W("AndroidBitmap_lockPixels() failed.");
return NULL;
}
cv::Mat img(info.height, info.width, CV_8UC4, pixels);
AndroidBitmap_unlockPixels(env, bitmap);
cv::cvtColor(img, img, cv::COLOR_RGBA2BGR);
std::vector<Box> boxs;
get_detector(env, instance)->detect(img, boxs);
if(boxs.size() < 1)
return NULL;
jclass rectClazz = env->FindClass("android/graphics/Rect");
jmethodID rectConstructorMethod = env->GetMethodID(rectClazz, "<init>", "(IIII)V");
int left = boxs ...
your pc code is swapping bgr in blobFromImage(), is your android code doing the same ? (you don't show)