# Mat rotation gets wrong result

I want to rotate an image by 90 degrees. My code is like following:

void rotate90(Mat & src, Mat & dst, int direction) {
int src_width = src.cols, src_height = src.rows;
cout << src_width << " " << src_height << endl;
Point center(src_width / 2.0f, src_height / 2.0f);
double angle = 0;
if(direction > 0)
angle = 90.0;
else
angle = -90.0;

Mat rot_mat = getRotationMatrix2D(center, angle, 1);

warpAffine(src, dst, rot_mat, Size(src_height, src_width));
}

int main(int argc, const char *argv[])
{

resize(img, img, Size(1024, 683));
rotate90(img, img, 1);

imwrite("/Users/chuanliu/Desktop/roatation.jpg",img);

return 0;
}


But the result is like following:
Before rotation:

After rotation:

It seems that the center of rotation has sth wrong. But I don't think I set a wrong center. Is there anyone can tell me what is wrong?

edit retag close merge delete

1

hmm, isn't the center point determined in dst-coords ? you also flip w/h there, if i rotate it around the src center, i get exactly your result.

( 2015-04-29 09:06:26 -0500 )edit

So could you tell me the right way to do this? Which point should I choose to be the rotation center?

( 2015-04-29 20:19:07 -0500 )edit

Sort by ยป oldest newest most voted

EDIT: adapted my own code a bit to get the desired result and make it more clear!

This is the code that I am using for rotating images around their center point

// Return the rotation matrices for each rotation
void rotate(Mat& src, double angle, Mat& dst)
{
Point2f pt(src.cols/2., src.rows/2.);
Mat r = getRotationMatrix2D(pt, angle, 1.0);
warpAffine(src, dst, r, cv::Size(src.cols, src.rows));
}


And then in code you simply do something like this code snippet (tested with latest 2.4 OpenCV branch)

#include <iostream>
#include "opencv2/opencv.hpp"

using namespace std;
using namespace cv;

// Return the rotation matrices for each rotation
void rotate(Mat& src, double angle, Mat& dst)
{
Point2f pt(src.cols/2., src.rows/2.);
Mat r = getRotationMatrix2D(pt, angle, 1.0);
warpAffine(src, dst, r, cv::Size(src.cols, src.rows));
}

int main()
{
imshow("input", input);

// Make larger image
int rows = input.rows;
int cols = input.cols;
int largest = 0;
if ( rows > cols ){
largest = rows;
}else{
largest = cols;
}
Mat temp = Mat::zeros(largest, largest, CV_8UC3);

// First define the roi in the large image --> draw this on a paper to make it clear
// There are two possible cases
Rect roi;
if (input.rows > input.cols){
roi = Rect((temp.cols - input.cols)/2, 0, input.cols, input.rows);
}
if (input.cols > input.rows){
roi = Rect(0, (temp.rows - input.rows)/2, input.cols, input.rows);
}

// Copy the original to the black large temp image
input.copyTo(temp(roi));

// Rotate the image
Mat rotated = temp.clone();
rotate(temp, 90, rotated);

imshow("rotated", rotated);

// Now cut it out again
Mat result = rotated(Rect(roi.y, roi.x, roi.height, roi.width)).clone();

imshow("result", result);

waitKey(0);
return 0;
}


Which results into the following images in order [original - rotated - cropped result]

more

Just another solution. You can specify an arbitrary angle but not the center point of the rotation. The output size of the image is automatically calculated.

#include <iostream>
#include "opencv2/opencv.hpp"

// Return the rotation matrices for each rotation
void rotate(cv::Mat& src, double angle, cv::Mat& dst) {
cv::Mat r = getRotationMatrix2D(cv::Point2f(), angle, 1.0);

//4 coordinates of the image
std::vector<cv::Point2f> corners(4);
corners[0] = cv::Point2f(0, 0);
corners[1] = cv::Point2f(0, src.rows);
corners[2] = cv::Point2f(src.cols, 0);
corners[3] = cv::Point2f(src.cols, src.rows);

std::vector<cv::Point2f> cornersTransform(4);
cv::transform(corners, cornersTransform, r);

//Copy the 2x3 transformation matrix into a 3x3 transformation matrix
cv::Mat H = cv::Mat::eye(3, 3, CV_64F);
for(int i = 0; i < 2; i++) {
for(int j = 0; j < 3; j++) {
H.at<double>(i, j) = r.at<double>(i, j);
}
}

double offsetX = 0.0, offsetY = 0.0, maxX = 0.0, maxY = 0.0;
//Get max offset outside of the image and max width / height
for(size_t i = 0; i < 4; i++) {
if(cornersTransform[i].x < offsetX) {
offsetX = cornersTransform[i].x;
}

if(cornersTransform[i].y < offsetY) {
offsetY = cornersTransform[i].y;
}

if(cornersTransform[i].x > maxX) {
maxX = cornersTransform[i].x;
}

if(cornersTransform[i].y > maxY) {
maxY = cornersTransform[i].y;
}
}

offsetX = -offsetX;
offsetY = -offsetY;
maxX += offsetX;
maxY += offsetY;

cv::Size size_warp(maxX, maxY);

//Create the transformation matrix to be able to have all the pixels
cv::Mat H2 = cv::Mat::eye(3, 3, CV_64F);
H2.at<double>(0,2) = offsetX;
H2.at<double>(1,2) = offsetY;

warpPerspective(src, dst, H2*H, size_warp);
}

int main() {
cv::Mat input;
if(!capture.isOpened()) {
return -1;
}
capture >> input;
if(input.empty()) {
return -1;
}

cv::resize(input, input, cv::Size(), 0.5, 0.5);
imshow("input", input);

cv::Mat rotated;
for(double angle = 0; angle < 360; angle += 1.0) {
rotate(input, angle, rotated);
cv::imshow("rotated", rotated);
char c = cv::waitKey(30);
if(c == 27) {
break;
}
}

return 0;
}

more

Official site

GitHub

Wiki

Documentation