Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

How to fix 'only black frames receiving' in Android with OpenCV

I was developing a Augmented Reality feature similar to inkHunter for a mobile application using python and openCV. The code worked well as I expected even-though it had some over-kills. I needed to make an android app and I knew that I need to convert that python code to C++ and run it in android with ndk since it had a real-time process. I was able to load openCV libraries to my android project and pass data between native class and the MainActivity as well. Then I converted my python code to C++(which is I'm not much familiar with) and then ran the project. But it gives me only black frames. The program shows no errors, but I don't get the expected output.

I'm trying with Android Studio 3.3.2 and OpenCV4Android 4.1.0

I used templateMatching method to detect the input template from the captured frame and then paste a png on the detected area using alpha blending and finally add that area to the frame using homography.

This is my code,

MainActivity.java

public class MainActivity extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2 {



    private static String TAG = "MainActivity";
    private JavaCameraView javaCameraView;

    // Used to load the 'native-lib' library on application startup.
    static {
        System.loadLibrary("native-lib");
        System.loadLibrary("opencv_java4");
    }

    private Mat mRgba;



    BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
        @Override
        public void onManagerConnected(int status) {
            switch(status){
                case BaseLoaderCallback.SUCCESS:{
                    javaCameraView.enableView();
                    break;
                }
                default:{
                    super.onManagerConnected(status);
                    break;
                }
            }
        }
    };

    private Mat temp, tattoo;

    @Override
    protected void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
        javaCameraView.setVisibility(SurfaceView.VISIBLE);
        javaCameraView.setCvCameraViewListener(this);

        AssetManager assetManager = getAssets();

        try {
            InputStream is = assetManager.open("temp.jpg");
            Bitmap bitmap = BitmapFactory.decodeStream(is);
            Bitmap bmp32 = bitmap.copy(Bitmap.Config.ARGB_8888, true);
            temp = new Mat(bitmap.getHeight(), bitmap.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32, temp);

        } catch (IOException e) {
            e.printStackTrace();
        }

        try {
            InputStream isTattoo = assetManager.open("tattoo2.png");
            Bitmap bitmapTattoo = BitmapFactory.decodeStream(isTattoo);
            Bitmap bmp32Tattoo = bitmapTattoo.copy(Bitmap.Config.ARGB_8888, true);
            tattoo = new Mat(bitmapTattoo.getHeight(), bitmapTattoo.getWidth(), CvType.CV_8UC4);
            Utils.bitmapToMat(bmp32Tattoo, tattoo);

        } catch (IOException e) {
            e.printStackTrace();
        }




    }

    @Override
    protected void onPause(){
        super.onPause();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onDestroy(){
        super.onDestroy();
        if(javaCameraView != null){
            javaCameraView.disableView();
        }
    }

    @Override
    protected void onResume(){
        super.onResume();
        if(OpenCVLoader.initDebug()){
            Log.i(TAG, "OpenCV Loaded successfully ! ");
            mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
        }else{
            Log.i(TAG, "OpenCV not loaded ! ");
            OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, mLoaderCallback);
        }
    }

    @Override
    public void onCameraViewStarted(int width, int height) {
        mRgba = new Mat(height, width, CvType.CV_8UC4);

    }

    @Override
    public void onCameraViewStopped() {
        mRgba.release();

    }

    @Override
    public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
        mRgba = inputFrame.rgba();

        augmentation(mRgba.getNativeObjAddr(), temp.getNativeObjAddr(), tattoo.getNativeObjAddr());

        return mRgba;
    }


    public native void augmentation(long matAddrRgba, long tempC, long tattooDesign);
}

native-lib.cpp

#include <jni.h>
#include <string>
#include <opencv2/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

extern "C" {



// Alpha Blending using direct pointer access
Mat& alphaBlendDirectAccess(Mat& alpha, Mat& foreground, Mat& background, Mat& outImage)
{

    int numberOfPixels = foreground.rows * foreground.cols * foreground.channels();

    float* fptr = reinterpret_cast<float*>(foreground.data);
    float* bptr = reinterpret_cast<float*>(background.data);
    float* aptr = reinterpret_cast<float*>(alpha.data);
    float* outImagePtr = reinterpret_cast<float*>(outImage.data);

    int i,j;
    for ( j = 0; j < numberOfPixels; ++j, outImagePtr++, fptr++, aptr++, bptr++)
    {
        *outImagePtr = (*fptr)*(*aptr) + (*bptr)*(1 - *aptr);
    }

    return outImage;
}


Mat& alphaBlend(Mat& foreg, Mat& backgg)
{

    // Read background image
    Mat background = backgg;// cropped frame
    Size sizeBackground = background.size();

    // Read in the png foreground asset file that contains both rgb and alpha information
    // Mat foreGroundImage = imread("foreGroundAssetLarge.png", -1); //resized tattoo
    Mat foreGroundImage = foreg;
    // resize the foreGroundImage to background image size
    resize(foreGroundImage, foreGroundImage, Size(sizeBackground.width,sizeBackground.height));
    Mat bgra[4];
    split(foreGroundImage, bgra);//split png foreground

    // Save the foregroung RGB content into a single Mat
    vector<Mat> foregroundChannels;
    foregroundChannels.push_back(bgra[0]);
    foregroundChannels.push_back(bgra[1]);
    foregroundChannels.push_back(bgra[2]);
    Mat foreground = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(foregroundChannels, foreground);

    // Save the alpha information into a single Mat
    vector<Mat> alphaChannels;
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    alphaChannels.push_back(bgra[3]);
    Mat alpha = Mat::zeros(foreGroundImage.size(), CV_8UC3);
    merge(alphaChannels, alpha);



    // Convert Mat to float data type
    foreground.convertTo(foreground, CV_32FC3);
    background.convertTo(background, CV_32FC3);
    alpha.convertTo(alpha, CV_32FC3, 1.0/255); // keeps the alpha values betwen 0 and 1

    // Number of iterations to average the performane over
    int numOfIterations = 1; //1000;



    // Alpha blending using direct Mat access with for loop
    Mat outImage = Mat::zeros(foreground.size(), foreground.type());

    for (int i=0; i<numOfIterations; i++) {
        outImage = alphaBlendDirectAccess(alpha, foreground, background, outImage);
    }

    imshow("alpha blended image", outImage/255);
    outImage = outImage/255;
    outImage.convertTo(outImage, CV_8U); // Convert float to Mat data type

    return outImage;
}




Mat& applyHomography(Mat& convertedOutImage, Mat& initialFrame, int startX, int startY, int endX, int endY)
{

    struct userdata{
        Mat im;
        vector<Point2f> points;
    };

    // Read in the image.
    Mat im_src = convertedOutImage;
    Size size = im_src.size();

    // Create a vector of points.
    vector<Point2f> pts_src;
    pts_src.push_back(Point2f(0,0));
    pts_src.push_back(Point2f(size.width - 1, 0));
    pts_src.push_back(Point2f(size.width - 1, size.height -1));
    pts_src.push_back(Point2f(0, size.height - 1 ));



    // Destination image
    Mat im_dst = initialFrame;
    vector<Point2f> pts_dst;
    pts_dst.push_back(Point2f(startX, startY));
    pts_dst.push_back(Point2f(endX, startY));
    pts_dst.push_back(Point2f(endX, endY));
    pts_dst.push_back(Point2f(startX, endY));


    Mat im_temp = im_dst.clone();


    // Calculate Homography between source and destination points
    Mat h = findHomography(pts_src, pts_dst);

    // Warp source image
    warpPerspective(im_src, im_temp, h, im_dst.size());


    // Black out polygonal area in destination image.
    fillConvexPoly(im_dst, pts_dst, Scalar(0), LINE_AA);

    // Add warped source image to destination image.
    im_dst = im_dst + im_temp;



    return im_dst;
}


JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign);

JNIEXPORT void JNICALL
Java_com_example_inkmastertest_MainActivity_augmentation(JNIEnv *env, jobject, jlong addrRgba, jlong tempC, jlong tattooDesign) {

    Mat& img = *(Mat*)addrRgba;
    Mat target_img = img.clone();

    Mat& template1 = *(Mat*)tempC;
    Mat& tattooDes = *(Mat*)tattooDesign;


    // Contains the description of the match
    typedef struct Match_desc{
        bool init;
        double maxVal;
        Point maxLoc;
        double scale;
        Match_desc(): init(0){}
    } Match_desc;

    Mat template_mat;
    template_mat = template1; // Read image
    cvtColor(template_mat, template_mat, COLOR_BGR2GRAY); // Convert to Gray
    Canny(template_mat, template_mat, 50, 50*4); // Find edges


    // Find size
    int tW, tH;
    tW = template_mat.cols;
    tH = template_mat.rows;



    Mat target_gray, target_resized, target_edged;

    cvtColor(target_img, target_gray, COLOR_BGR2GRAY); // Convert to Gray

    const float SCALE_START = 1;
    const float SCALE_END = 0.2;
    const int SCALE_POINTS = 20;

    Match_desc found;
    for(float scale = SCALE_START; scale >= SCALE_END; scale -= (SCALE_START - SCALE_END)/SCALE_POINTS){
        resize(target_gray, target_resized, Size(0,0), scale, scale);// Resize

        // Break if target image becomes smaller than template
        if(tW > target_resized.cols || tH > target_resized.rows) break;


        Canny(target_resized, target_edged, 50, 50*4); // Find edges

        // Match template
        Mat result;
        matchTemplate(target_edged, template_mat, result, TM_CCOEFF);

        double maxVal; Point maxLoc;
        minMaxLoc(result, NULL, &maxVal, NULL, &maxLoc);

        // If better match found
        if( found.init == false || maxVal > found.maxVal ){
            found.init = true;
            found.maxVal = maxVal;
            found.maxLoc = maxLoc;
            found.scale = scale;
        }


    }

    int startX, startY, endX, endY;
    startX = found.maxLoc.x / found.scale;
    startY = found.maxLoc.y / found.scale;

    endX= (found.maxLoc.x + tW) / found.scale;
    endY= (found.maxLoc.y + tH) / found.scale;

    // draw a bounding box around the detected result and display the image
    rectangle(target_img, Point(startX, startY), Point(endX, endY), Scalar(0, 0, 255), 3);


    Rect myROI(startX, startY, endX, endY);
    Mat cropped = target_img(myROI);

    Mat alphaBlended = alphaBlend(tattooDes , cropped);
    Mat homographyApplied = applyHomography(alphaBlended, target_img, startX, startY, endX, endY);

    img = homographyApplied;


}


}

It will be better if I can skip homography, But I don't know how to alpha blend images with two different sizes. My expected output is to show the input png(tattoo2.png) on the detected template area. I would be most grateful if you could please help me on this. Kindly let me know if I need to mention anything else.