Ask Your Question
0

how to use videostab.cpp in a swift program

asked 2016-01-05 10:32:23 -0600

drPatience gravatar image

I have successfully installed openCV on my swift project using a bridging header and objective c wrapper to change the color scate of an image from rgb to gray scale. I'm attempting to implement video stabilisation in my project using link:videostab. I understand the code to an extent but need to create a objective c wrapper also, anyone done this before ?

edit retag flag offensive close merge delete

1 answer

Sort by ยป oldest newest most voted
1

answered 2016-01-11 12:56:34 -0600

Below is a copy of the code I have in my wrapper, following the same code as your link.

You'll need to change the extension of your Obj-C wrapper to .mm.

The following works for me:

OpenCVWrapper.mm

#include "OpenCVWrapper.h"
#import "UIImage+OpenCV.h"

#include <opencv2/opencv.hpp>

using namespace cv;
using namespace std;

const int SMOOTHING_RADIUS = 30; // In frames. The larger the more stable the video, but less reactive to sudden panning

struct TransformParam {
    TransformParam() {}
    TransformParam(double _dx, double _dy, double _da) {
        dx = _dx;
        dy = _dy;
        da = _da;
    }

    double dx, dy, da;
};

struct Trajectory {
    Trajectory() {}
    Trajectory(double _x, double _y, double _a) {
        x = _x;
        y = _y;
        a = _a;
    }

    double x, y, a;
};

@implementation OpenCVWrapper : NSObject

+ (NSURL *)processVideoFileWithOpenCV:(NSURL*)url : (NSURL*)result : (UIImageView*)imageView : (UILabel*)label {

    dispatch_async(dispatch_get_main_queue(), ^{
        label.text = [NSString stringWithCString:"Calculating Stabilisation..." encoding:[NSString defaultCStringEncoding]];
    });

    String file = *new String(url.path.UTF8String);
    String resultFile = *new String(result.path.UTF8String);

    VideoCapture cap(file);

    assert(cap.isOpened());

    Mat cur, cur_grey, cur_orig;
    Mat prev, prev_grey, prev_orig;

    cap >> prev;
    cvtColor(prev, prev_grey, COLOR_BGR2GRAY);

    // Step 1 - Get previous to current frame transformation (dx, dy, da) for all frames
    vector <TransformParam> prev_to_cur_transform; // previous to current

    int frames=1;
    int max_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
    cout << max_frames;
    Mat last_T;

    while(true) {
        cap >> cur;

        if(cur.data == NULL) {
            break;
        }

        cvtColor(cur, cur_grey, COLOR_BGR2GRAY);

        // vector from prev to cur
        vector <Point2f> prev_corner, cur_corner;
        vector <Point2f> prev_corner2, cur_corner2;
        vector <uchar> status;
        vector <float> err;

        goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
        calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err);

        // weed out bad matches
        for(size_t i=0; i < status.size(); i++) {
            if(status[i]) {
                prev_corner2.push_back(prev_corner[i]);
                cur_corner2.push_back(cur_corner[i]);

                cv::line(cur, prev_corner[i], cur_corner[i], CV_RGB(255,0,0), 1, CV_AA); // DEBUGGING ONLY
            }
        }

        dispatch_async(dispatch_get_main_queue(), ^{
            [imageView setImage:[UIImage imageWithCVMat:cur]];
        });

        // translation + rotation only
        Mat T = estimateRigidTransform(prev_corner, cur_corner, true); // false = rigid transform, no scaling/shearing

        // in rare cases no transform is found. We'll just use the last known good transform.
        if(T.data == NULL) {
            last_T.copyTo(T);
        }

        T.copyTo(last_T);

        // decompose T
        double dx = T.at<double>(0,2);
        double dy = T.at<double>(1,2);
        double da = atan2(T.at<double>(1,0), T.at<double>(0,0));

        prev_to_cur_transform.push_back(TransformParam(dx, dy, da));

        cur.copyTo(prev);
        cur_grey.copyTo(prev_grey);

        frames++;
    }

    // Step 2 - Accumulate the transformations to get the image trajectory

    // Accumulated frame to frame transform
    double a = 0;
    double x = 0;
    double y = 0;

    vector <Trajectory> trajectory; // trajectory at all frames

    for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
        x += prev_to_cur_transform[i].dx;
        y += prev_to_cur_transform[i].dy;
        a += prev_to_cur_transform[i].da;

        trajectory.push_back(Trajectory(x,y,a));
    }

    // Step 3 - Smooth out the trajectory using an averaging window
    vector <Trajectory> smoothed_trajectory; // trajectory at all frames

    for(size_t i=0; i < trajectory.size(); i++) {
        double sum_x = 0;
        double sum_y = 0;
        double sum_a = 0;
        int count = 0;

        for(int j=-SMOOTHING_RADIUS; j <= SMOOTHING_RADIUS; j++) {
            if(i+j >= 0 && i+j < trajectory.size()) {
                sum_x += trajectory[i+j].x;
                sum_y += trajectory[i+j].y;
                sum_a += trajectory ...
(more)
edit flag offensive delete link more

Question Tools

1 follower

Stats

Asked: 2016-01-05 10:32:23 -0600

Seen: 804 times

Last updated: Jan 11 '16