I am trying to do an image stitching project where I use point pairs calculated by tracking points between frames of a video using the Lucas Kanade algorithm to find homography matrices. After writing the program and it came time for stitching together the frames of a video, I decided to run a test where I simply display the perspective warped versions of each image onto a black canvas to see how the Homography matrix had warped them. When I did this, instead of moving over bit by bit between frames, frames were translated further and further distances, way off from a slight nudge between frames
[----------------------------------------------------------------------------Empty Space---------------------------------------]
[Frame0---------------------------------------------------------------------------------------------------------------------------]
[------------Frame1-------------------------------------------------------------------------------------------------------------- ]
[-------------------------------------------Frame 2-------------------------------------------------------------------------------]
[---------------------------------------------------------------------------------------------------------------Frame 3-----------]
Subsequent frames would be out of visual range. I am not quiet sure why this is happening. I implemented a back-projection error check to make sure only points with accurate optical flow calculations were passed on. I also set the back-projection threshold for findHomography to be 10, 1, and then 0.5, all to no avail. I am stitching multiple images, so I am multiplying my homography matrices between frames. This seems to be compounding the error. Why is this happening and how can I fix my homography matrices? Here is my code (ignore commented out tests. Also, some of the indentation formatting might have been messed with while copying over to the forum):
import numpy as np
import sys
import cv2
import math
lastFeatures = None
currentFeatures = None
opticFlow = None
panRow = None
Rows = None
finalPanorama = None
def loadRow(dirPath, fType, numImages, column):
imageRow = []
for i in range(0, numImages):
imageRow.append(cv2.imread("%s/%i_%i.%s" % (dirPath, column, i, fType), cv2.IMREAD_COLOR))
return imageRow
def findNthFeatures(prevImg, prevPnts, nxtImg):
back_threshold = 0.5
nxtDescriptors = []
prevGrey = None
nxtGrey = None
nxtPnts = prevPnts[:]
prevGrey = cv2.cvtColor(prevImg, cv2.COLOR_BGR2GRAY)
nxtGrey = cv2.cvtColor(nxtImg, cv2.COLOR_BGR2GRAY)
lucasKanadeParams = dict(winSize = (19,19), maxLevel = 100, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
nxtPnts, status, err = cv2.calcOpticalFlowPyrLK(prevGrey, nxtGrey, prevPnts, None, **lucasKanadeParams)
backProjections, status, err = cv2.calcOpticalFlowPyrLK(nxtGrey, prevGrey, nxtPnts, None, **lucasKanadeParams)
d = abs(prevPnts - backProjections).reshape(-1, 2).max(-1)
status = d < back_threshold
goodNew = nxtPnts[status].copy()
goodLast = prevPnts[status].copy()
return goodLast, goodNew
def getHomographies(videoName):
color = np.random.randint(0,255,(100,3))
lastFrame = None
currentFrame = None
lastKeypoints = None
currentKeypoints = None
firstImage = True
featureRefreshRate = 5
feature_params = dict( maxCorners = 100,
qualityLevel = 0.1,
minDistance = 8,
blockSize = 15)
frameCount = 0
Homographies = []
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(videoName)
flags, frame = cap.read()
while flags:
if firstImage:
firstImage = False
lastFrame = frame[:,:].copy()
lastGray = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(lastGray, mask = None, **feature_params)
flags, frame = cap.read()
frameCount += 1
else:
mask = np.zeros_like(lastFrame)
currentFrame = frame[:,:].copy()
frameCount += 1
lastKeypoints, currentKeypoints = findNthFeatures(lastFrame, lastKeypoints, currentFrame)
# for i,(new,old) in enumerate(zip(currentKeypoints, lastKeypoints)):
# a, b = new.ravel()
# c, d = old.ravel()
# mask = cv2.line(mask, (a,b), (c,d), color[i].tolist(), 2)
# frame = cv2.circle(frame, (a,b), 5, color[i].tolist(), -1)
# img = cv2.add(frame,mask)
# cv2.imshow('display', img)
# cv2.waitKey(0)
homographyMatrix, homographyStatus = cv2.findHomography(currentKeypoints, lastKeypoints, cv2.RANSAC, 0.5)
Homographies.append(homographyMatrix)
lastFrame = currentFrame
lastKeypoints = currentKeypoints
if frameCount % featureRefreshRate == 0:
grayBuf = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(grayBuf, mask = None, **feature_params)
flags, frame = cap.read()
return Homographies
def stitchRow(videoName):
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
frameCount = 0
cap = cv2.VideoCapture(videoName)
ret, initialImage = cap.read()
homographyMatrices = []
homographyMatrices = getHomographies(videoName)
warpHMat = homographyMatrices[frameCount]
while ret:
ret, nextImg = cap.read()
frameCount += 1
result = cv2.warpPerspective(nextImg, warpHMat, (initialImage.shape[1] + nextImg.shape[1], nextImg.shape[0]))
#result[0:initialImage.shape[0], 0:initialImage.shape[1]] = initialImage
cv2.imshow('display', result)
cv2.waitKey(0)
# cv2.imshow('display', initialImage)
# cv2.waitKey(0)
warpHMat = homographyMatrices[frameCount]
for j in range(frameCount, 0, -1):
warpHMat = warpHMat * homographyMatrices[j-1]
# initialImage = result[:, :].copy()
stitchRow(sys.argv[1])