I think you don't need octave parameters when you want to compute SURF descriptor. You need only location (point2f) and size (float)
octave is used when you want to detect point.
SURF descriptors are computed here
I have tested this method using this program
Mat image1 = imread("F:/lib/opencv/samples/data/lena.jpg",IMREAD_COLOR);
Ptr<Feature2D> bsurf = cv::xfeatures2d::SurfFeatureDetector::create(400);
std::vector< KeyPoint > keypoints_object;
Mat descriptors_object;
bsurf->detect(image1, keypoints_object);
bsurf->compute(image1, keypoints_object, descriptors_object);
cout << "*************** SURF REF ********************\n";
cout << keypoints_object[0].size << "\t" << keypoints_object[0].angle << "\t" << keypoints_object[0].response << "\t" << keypoints_object[0].octave;
cout<< "\t" << descriptors_object.row(0) << "\n";
cout << "SURF TEST ********************\n";
for (int i = 0; i < 3; i++)
{
Ptr<Feature2D> bsurf2 = cv::xfeatures2d::SurfFeatureDetector::create(400);
std::vector<KeyPoint> testKey;
KeyPoint pTest{ keypoints_object[0].pt,i*1.5F+keypoints_object[0].size,0.0F,0.0F,0,-1 };
testKey.push_back(pTest);
bsurf2->compute(image1, testKey, descriptors_object);
cout << testKey[0].size << "\t" << testKey[0].angle << "\t" << testKey[0].response << "\t" << testKey[0].octave << "\t" << descriptors_object << "\n";
}
When angle varying descriptors change. same angle = same descriptor even if octave or response are different
For SIFT descriptors it is not so easy. You have to know octave and layer and xi quantity. It is computed here
Quantity xi is defined here see section 4 of Lowe's paper
program to test is :
cout << "*************** SIFT REF********************\n";
keypoints_object.clear();
Ptr<Feature2D> bsift = cv::xfeatures2d::SiftFeatureDetector::create(1);
bsift->detect(image1, keypoints_object);
bsift->compute(image1, keypoints_object, descriptors_object);
cout << keypoints_object[0].size << "\t" <<keypoints_object[0].angle << "\t" << keypoints_object[0].response << "\t" << keypoints_object[0].octave << "\n";
cout<< descriptors_object << "\n";
cout << "SIFT TEST ********************\n";
float xi=-0.494;
for (int i = 0; i < 3; i++)
{
Ptr<Feature2D> bsift2 = cv::xfeatures2d::SiftFeatureDetector::create(1);
std::vector<KeyPoint> testKey;
int octave=-1;
int layer=3;
KeyPoint pTest={keypoints_object[0].pt, keypoints_object[0].size+i*10,keypoints_object[0].angle,0,0,-1 };
pTest.octave= octave + (layer << 8) +(cvRound((xi + 0.5) * 255) << 16);
testKey.push_back(pTest);
bsift2->compute(image1, testKey, descriptors_object);
cout << testKey[0].size << "\t" << testKey[0].angle << "\t" << testKey[0].response << "\t" << keypoints_object[0].octave << "\n" << descriptors_object << "\n";
}
I think that it is necessary to read lowe's paper...
Extracting SIFT (or other detector) parameters from custom keypoints interests me too...