error in unknown size x,y
Hi guys,
I have an error unknown size x,y, any suggestion?
#include <stdio.h> #include <iostream>#include <vector> #include <cmath> #include<opencv2/opencv.hpp> #include <opencv2/nonfree/features2d.hpp> #include<opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include "linefinder.h" #include "edgedetector.h"using namespace cv; int main(int argc, char** argv){
//-- CV Capture object for camera
CvCapture* capture;
//-- Frame Captured from capture object
Mat frame;
Mat lastFrame;
//-- Start capture from default camera
capture = cvCaptureFromFile(".\video1.avi");
//-- If capture was successful
if (capture)
{
cv::namedWindow("Detected Lanes");
cvSetWindowProperty("Detected Lanes", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
int linesDetected = 20;
//-- While this program is running
//futline is used to store the slope of past lines in order to compare them between each other
float futline[linesDetected];
//the x coordinates of stored lines
float x[2 * (linesDetected)];
//the y coordinates of stored lines
float y[2 * (linesDetected)];
std::vector<cv::Vec4i> lastLines;
int b = 0;
int frameCount = 0;
LineFinder ld;
ld.setLineLengthAndGap(40, 5);
ld.setMinVote(10);
while (true)
{
frame = cvQueryFrame(capture);
if (!frame.empty()){
cv::Mat image;
cv::resize(frame, image, Size(), 0.5, 0.5, INTER_NEAREST);
cv::Mat currentFrame;
cv::resize(frame, currentFrame, Size(), 0.5, 0.5, INTER_NEAREST);
int height = image.size().height;
int width = image.size().width;
cv::Rect myROI(width*0.167, height / 2, width*.833, height / 2);
image = image(myROI);
if (!image.data)
return 0;
EdgeDetector ed;
ed.computeSobel(image);
cv::Mat contours;
cv::Canny(image, contours, 50, 200);
cv::Mat contoursInv;
cv::threshold(contours, contoursInv, 128, 255, cv::THRESH_BINARY_INV);
cv::imshow("Edges", contours);
std::vector<cv::Vec2f> lines;
cv::HoughLines(contours, lines, 1, PI / 180, 10);
cv::Mat result(contours.rows, contours.cols, CV_8U, cv::Scalar(255));
image.copyTo(result);
float slope[lines.size()];
int f = 0;
int e = 0;
int col1 = 0;
int col2 = 0;
std::vector<cv::Vec2f>::const_iterator it = lines.begin();
while (it != lines.end()) {
float rho = (*it)[0]; // first element is distance rho
float theta = (*it)[1]; // second element is angle theta
if (theta < PI / 4. || theta > 3.*PI / 4.) {
cv::Point pt1(rho / cos(theta), 0);
cv::Point pt2((rho - result.rows*sin(theta)) / cos(theta), result.rows);
cv::line(result, pt1, pt2, cv::Scalar(200, 0, 0), 10);
}
else {
cv::Point pt1(0, rho / sin(theta));
cv::Point pt2(result.cols, (rho - result.cols*cos(theta)) / sin(theta));
cv::line(result, pt1, pt2, cv::Scalar(200, 0, 0), 10);
}
++it;
}
std::vector<cv::Vec4i> li = ld.findLines(contours);
li = ld.removeLinesOfInconsistentOrientations(ed.getOrientation(), 0.4, 0.15, image);
std::vector<cv::Vec4i>::const_iterator it2 = li.begin();
int li_size = 0;
while (it2 != li.end()) {
if ((*it2)[0] > 30 && (*it2)[1] > 30 && (*it2)[2] >30 && (*it2)[3] > 30){
std::cout << "0 : " << (*it2)[0] << "\n";
std::cout << "1 : " << (*it2)[1] << "\n";
std::cout << "2 : " << (*it2)[2] << "\n";
std::cout << "3 : " << (*it2)[3] << "\n";
li_size++;
}
++it2;
}
it2 = li.begin();
b = 0;
while (it2 != li.end()) {
float x1 = (float)(*it2)[0];
float x2 = (float)(*it2)[2];
float y1 = (float)(*it2)[1];
float y2 = (float)(*it2 ...
float futline[linesDetected];
<-- this is a non-portable gcc only extension. (normally, you can only allocate in "auto" memory, when the size is known at compile time.) also, a fixed size array might overflow, so usestd::vector<float>
instead, and allocate it, once you have the correct linecount from your detection.then, some general remarks:
please don't throw a wall of code at us. isolate your problem, and make a minimal reproducable example
please do not use cvCapture* , or any other opencv legacy constructs in your code.