Iam trying to do key frame extraction.For that iam using surf algorithm.While running the code an error is being shown over.Iam not getting why that error is being occured.Can you kindly please find a solution for that error ?
the error is :
OpenCV Error: Assertion failed (queryDescriptors.type() == trainDescCollection[0].type()) in knnMatchImpl, file /home/user/OpenCV/opencv-2.4.6.1/modules/features2d/src/matchers.cpp, line 351 Traceback (most recent call last): File "sunday.py", line 170, in <module> kp_pairs = match_images(img1, img2) File "sunday.py", line 24, in match_images raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) cv2.error: /home/user/OpenCV/opencv-2.4.6.1/modules/features2d/src/matchers.cpp:351: error: (-215) queryDescriptors.type() == trainDescCollection[0].type() in function knnMatchImpl
the code which i used is :
import numpy import cv2 import os import sys
global stat
count1=1 count=1
#
Image Matching
#
def match_images(img1, img2): """Given two images, returns the matches""" detector = cv2.SURF(300,3,4) matcher = cv2.BFMatcher(cv2.NORM_L2)
kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)
# print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)) #t1=len(kp1) #t2=len(kp2) raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) kp_pairs = filter_matches(kp1, kp2, raw_matches) return kp_pairs
def filter_matches(kp1, kp2, matches, ratio = 0.75): mkp1, mkp2 = [], [] for m in matches: if len(m) == 2 and m[0].distance < m[1].distance * ratio: m = m[0] mkp1.append( kp1[m.queryIdx] ) mkp2.append( kp2[m.trainIdx] ) kp_pairs = zip(mkp1, mkp2) return kp_pairs
#
Match Diplaying
#
'''def explore_match(win, img1, img2, kp_pairs, status = None, H = None): counts=1 h1, w1 = img1.shape[:2] h2, w2 = img2.shape[:2] vis = numpy.zeros((max(h1, h2), w1+w2), numpy.uint8) vis[:h1, :w1] = img1 vis[:h2, w1:w1+w2] = img2 vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = numpy.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv2.polylines(vis, [corners], True, (55, 55, 55))
if status is None:
status = numpy.ones(len(kp_pairs), numpy.bool_)
p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs])
p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 55, 0)
red = (0, 0, 55)
white = (55, 55, 55)
kp_color = (25, 50, 136)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imshow(win, vis)
cv2.imwrite("/home/user/mini/vector/frame%d.jpg"%counts,vis)
counts=counts+1'''
def draw_matches(window_name, kp_pairs, img1, img2): global m """Draws the matches for """ mkp1, mkp2 = zip(*kp_pairs)
p1 = numpy.float32([kp.pt for kp in mkp1])
p2 = numpy.float32([kp.pt for kp in mkp2])
if len(kp_pairs) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
stat=(numpy.sum(status))
print ' matches' , (numpy.sum(status))
else:
H, status = None, None
#print '%d matches found, not enough for homography estimation' % len(p1)
#if (mat<=10):
# explore_match(window_name, img1, img2, kp_pairs, status, H)
#global stat
m=stat
#
Test Main
#
if __name__ == '__main__':
m=0
try:
vidFile = cv2.VideoCapture(sys.argv[1])
except:
print "problem opening input stream"
sys.exit(1)
if not vidFile.isOpened():
print "capture stream not open"
sys.exit(1)
OPENING THE VIDEO
nFrames = int(vidFile.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) # one good way of namespacing legacy openCV: cv2.cv.*
print "frame number: %s" %nFrames
fps = vidFile.get(cv2.cv.CV_CAP_PROP_FPS)
print "FPS value: %s" %fps
'''ret, frame = vidFile.read() # read first frame, and the return code of the function.
while ret: # note that we don't have to use frame number here, we could read from a live written file.
#print "yes"
cv2.imshow("frameWindow", frame)
cv2.waitKey(int(1/fps*1000)) # time to wait between frames, in mSec
ret, frame = vidFile.read() # read next frame, get next return code '''
CONVERTING INTO IMAGE SEQUENCE
vidFile=cv2.VideoCapture(sys.argv[1])
suc,image=vidFile.read()
count=1
while suc:
suc,image=vidFile.read()
#if (count>=50 and count<=1246):
cv2.imwrite("/home/user/trial/conv/%d.jpg"%count,image)
#if cv2.waitKey(10)==27:
# break;
count=count+1
#imageformat=".jpg"
#path1="/home/user/mini/vid"
#imfilelist1=[os.path.join(path1,f1) for f1 in os.listdir(path1) if f1.endswith(imageformat)]
#imfilelist1.sort()
for i in range(1,nFrames):
for j in range(i+1,nFrames):
fn1 = "/home/user/trial/conv/%d.jpg"%(i)
fn2 = "/home/user/trial/conv/%d.jpg"%(j)
img1 = cv2.imread(fn1, 1)
img2 = cv2.imread(fn2, 1)
kp_pairs = match_images(img1, img2)
#global m
if kp_pairs:
draw_matches('find_obj', kp_pairs, img1, img2)
cv2.waitKey(100)
cv2.destroyAllWindows()
#global m
if(j==nFrames-1):
print "Completed inner"
break;
if(m<=10):
cv2.imwrite("/home/user/trial/mt/%d.jpg"%count1,img1)
count1=count1+1
i=j-1
if(j==nFrames-1):
print "Completed"
break;
# if img1 is None: # print 'Failed to load fn1:', fn1 # sys.exit(1)
#if img2 is None:
# print 'Failed to load fn2:', fn2
# sys.exit(1)
else:
# print "No matches found"
# '''