Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Align two images with only translation and rotation without scaling

Is there any way that we can use while estimating the transformation? I wish the transformation has no scaling involved case I know the exact size of those images cause they are actually scanned images, which means no scaling should be involved.

here is my code:

if __name__ == "__main__":
full_affine = False
try_cuda = True
match_conf = 0.3
finder = cv2.ORB.create()
matcher = cv2.detail_AffineBestOf2NearestMatcher(full_affine, try_cuda, match_conf)
source_img = cv2.imread("000001.jpg")
target_img = cv2.imread("000000.jpg")

source_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=source_img)
target_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=target_img)

source_info = load_tile_info("000001.xml", img_path, 1)
target_info = load_tile_info("000000.xml", img_path, 0)

matching_result = matcher.apply(source_feature, target_feature)

print("matching_result.confidence")
print(matching_result.confidence)
print("matching_result.H")
print(matching_result.H)
print("matching_result.dst_img_idx")
print(matching_result.dst_img_idx)
print("matching_result.src_img_idx")
print(matching_result.src_img_idx)

Align two images with only translation and rotation without scaling

Is there any way that we can use while estimating the transformation? I wish the transformation has no scaling involved case I know the exact size of those images cause they are actually scanned images, which means no scaling should be involved.

here is my code:

if __name__ == "__main__":
full_affine = False
try_cuda = True
match_conf = 0.3
finder = cv2.ORB.create()
matcher = cv2.detail_AffineBestOf2NearestMatcher(full_affine, try_cuda, match_conf)
source_img = cv2.imread("000001.jpg")
target_img = cv2.imread("000000.jpg")

source_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=source_img)
target_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=target_img)

source_info = load_tile_info("000001.xml", img_path, 1)
target_info = load_tile_info("000000.xml", img_path, 0)

matching_result = matcher.apply(source_feature, target_feature)

print("matching_result.confidence")
print(matching_result.confidence)
print("matching_result.H")
print(matching_result.H)
print("matching_result.dst_img_idx")
print(matching_result.dst_img_idx)
print("matching_result.src_img_idx")
print(matching_result.src_img_idx)

I found an answer years ago. https://answers.opencv.org/question/90428/scale-variant-feature-matching/

eduardo https://answers.opencv.org/users/16447/eduardo/ provided an answer that setting nlevels = 1

However the output is still a transform with noticeable scaling, which leads to a wrong alignment.

Align two images with only translation and rotation without scaling

Is there any way that we can use while estimating the transformation? I wish the transformation has no scaling involved case I know the exact size of those images cause they are actually scanned images, which means no scaling should be involved.

here is my code:

if __name__ == "__main__":
 full_affine = False
 try_cuda = True
 match_conf = 0.3
    # finder = cv2.ORB.create()
    finder = cv2.ORB_create(scaleFactor=1.2, nlevels=1, edgeThreshold=31,
                             firstLevel=0, WTA_K=2, scoreType=cv2.ORB_HARRIS_SCORE,
                             nfeatures=100, patchSize=31)
    matcher = cv2.detail_AffineBestOf2NearestMatcher(full_affine, try_cuda, match_conf)
 source_img = cv2.imread("000001.jpg")
 target_img = cv2.imread("000000.jpg")

 source_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=source_img)
 target_feature = cv2.detail.computeImageFeatures2(featuresFinder=finder, image=target_img)

 source_info = load_tile_info("000001.xml", img_path, 1)
 target_info = load_tile_info("000000.xml", img_path, 0)

 matching_result = matcher.apply(source_feature, target_feature)

 print("matching_result.confidence")
 print(matching_result.confidence)
 print("matching_result.H")
 print(matching_result.H)
 print("matching_result.dst_img_idx")
 print(matching_result.dst_img_idx)
 print("matching_result.src_img_idx")
 print(matching_result.src_img_idx)

I found an answer years ago. https://answers.opencv.org/question/90428/scale-variant-feature-matching/

eduardo https://answers.opencv.org/users/16447/eduardo/ provided an answer that setting nlevels = 1

However the output is still a transform with noticeable scaling, which leads to a wrong alignment.