How could I get more accurate images detects and crops?

asked 2019-09-18 09:43:35 -0600

frank95 gravatar image

Hi all, im coding an app for automatic image processing. And need to do a feature matching and crop it from another image. Actually im using sift with good ratio but sometimes i get bad keypoints and get bad crops. I upload some examples where you can see my problem.

How could solve this? I follow a few tutorials on internet but without any success... I want to make this algorithm accurate and with which you can read any image.

Examples files: https://imgur.com/a/F22vUMw

    imguser = cv2.imread("imageuser.jpg")
    imgcom = cv2.imread("imagecompare.jpg")

    gray1 = cv2.cvtColor(imguser, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(imgcom, cv2.COLOR_BGR2GRAY)

#Start creating match matrix
sift = cv2.xfeatures2d.SIFT_create(contrastThreshold=0,edgeThreshold=300)

FLANN_BASED_MATCHER = 1
index_params = dict(algorithm=FLANN_BASED_MATCHER, trees=10)
search_params = dict(checks=100)

#Create flann matcher
matcher = cv2.FlannBasedMatcher(index_params, search_params)

#Detect keypoints and compute keypointer descriptors
kpts1, descs1 = sift.detectAndCompute(imguser,None)
kpts2, descs2 = sift.detectAndCompute(imgcom,None)

#knnMatch to get Top2
matches = matcher.knnMatch(descs1, descs2, 2)

#Sort by their distance.
matches = sorted(matches, key = lambda x:x[0].distance)

#Ratio test, to get good matches.
good = []
for m,n in matches:
    if m.distance < 0.75 * n.distance:
        good.append(m)

    #queryIndex for the small object, trainIndex for the scene )
src_pts = np.float32([ kpts1][m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kpts2[m.trainIdx].pt for m in good]).reshape(-1,1,2)

#Find homography matrix in cv2.RANSAC using good match points
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 4.0)

print(dst_pts)
h,w = jpguser.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)

    canvas = imgcom.copy()
    cv2.polylines(canvas,[np.int32(dst)],True,(0,0,255),3, cv2.LINE_AA)

#DrawMatches
matched = cv2.drawMatches(imguser,kpts1,canvas,kpts2,good,None)

#Crop the matched region from scene
h,w = imguser.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
perspectiveM = cv2.getPerspectiveTransform(np.float32(dst),pts)
found = cv2.warpPerspective(imgcom,perspectiveM,(w,h),borderMode=cv2.BORDER_REPLICATE)

#Save the cropped image
cv2.imwrite("final.jpg", found)
edit retag flag offensive close merge delete