Ask Your Question

Revision history [back]

click to hide/show revision 1
initial version

Need precise cuts for my application

Hello everyone, look I am creating an app in python and I am having difficulties for an algorithm. The app tries to match photos and make a cut once compared.

It involves passing two images, a raw photo of the photographer in tiff and the other a jpeg revealed from the previous tiff. I am currently having problems with the cut it generates when comparing and with flipped images. Im using SIFT + good matches but don't know how to give it precision since every time it is executed on the same image it gives different cuts and sometimes it is not able to detect other images.

I compare the user JPG file with the tiff, detect the image in the scene and crop ir from the tiff with the same size as jpg.

This is more or less what I have used, but edited:

import numpy as np
import cv2
from matplotlib import pyplot as plt

MIN_MATCH_COUNT = 4

img1 = cv2.imread('imagejpguser.jpg',0)          # queryImage
img2 = cv2.imread('imageforcompare.jpg',0) # trainImage

gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

sift = cv2.xfeatures.SIFT_create(contrastThreshold=0, edgeThreshold=300)

kp1, des1 = sift.detectAndCompute(gray1,None)
kp2, des2 = sift.detectAndCompute(gray2,None)

FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 10)

flann = cv2.FlannBasedMatcher(index_params)

matches = flann.knnMatch(des1,des2,k=2)

good = []
for m,n in matches:
    if m.distance < 0.45*n.distance:
        good.append(m)

if len(good)>MIN_MATCH_COUNT:
    canvas = imgmatched.copy()
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)

    h,w = img1.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)

    cv2.polylines(canvas,[np.int32(dst)],True,(0,0,255),3, cv2.LINE_AA)


matched = cv2.drawMatches(img1,kp1,canvas,kp2,good,None)

h,w = img1.shape[:2]
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
perspectiveM = cv2.getPerspectiveTransform(np.float32(dst),pts)
found = cv2.warpPerspective(img2,perspectiveM,(w,h),borderMode=cv2.BORDER_REPLICATE)


#Save the cropped image
cv2.imwrite("final.jpg", found, [int(cv2.IMWRITE_JPEG_QUALITY), 100])

cv2.imwrite("matched.jpg", matched)

else:
    print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
    matchesMask = None