Ask Your Question

krishnaprasad's profile - activity

2020-02-12 06:11:23 -0600 received badge  Popular Question (source)
2014-03-23 05:08:32 -0600 asked a question key frame extraction

Iam trying to do key frame extraction.For that iam using surf algorithm.While running the code an error is being shown over.Iam not getting why that error is being occured.Can you kindly please find a solution for that error ?

the error is :

OpenCV Error: Assertion failed (queryDescriptors.type() == trainDescCollection[0].type()) in knnMatchImpl, file /home/user/OpenCV/opencv-2.4.6.1/modules/features2d/src/matchers.cpp, line 351 Traceback (most recent call last): File "sunday.py", line 170, in <module> kp_pairs = match_images(img1, img2) File "sunday.py", line 24, in match_images raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) cv2.error: /home/user/OpenCV/opencv-2.4.6.1/modules/features2d/src/matchers.cpp:351: error: (-215) queryDescriptors.type() == trainDescCollection[0].type() in function knnMatchImpl

the code which i used is :

import numpy import cv2 import os import sys

global stat

count1=1 count=1

#

Image Matching

#

def match_images(img1, img2): """Given two images, returns the matches""" detector = cv2.SURF(300,3,4) matcher = cv2.BFMatcher(cv2.NORM_L2)

kp1, desc1 = detector.detectAndCompute(img1, None)
kp2, desc2 = detector.detectAndCompute(img2, None)

# print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2)) #t1=len(kp1) #t2=len(kp2) raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) kp_pairs = filter_matches(kp1, kp2, raw_matches) return kp_pairs

def filter_matches(kp1, kp2, matches, ratio = 0.75): mkp1, mkp2 = [], [] for m in matches: if len(m) == 2 and m[0].distance < m[1].distance * ratio: m = m[0] mkp1.append( kp1[m.queryIdx] ) mkp2.append( kp2[m.trainIdx] ) kp_pairs = zip(mkp1, mkp2) return kp_pairs

#

Match Diplaying

#

'''def explore_match(win, img1, img2, kp_pairs, status = None, H = None): counts=1 h1, w1 = img1.shape[:2] h2, w2 = img2.shape[:2] vis = numpy.zeros((max(h1, h2), w1+w2), numpy.uint8) vis[:h1, :w1] = img1 vis[:h2, w1:w1+w2] = img2 vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)

if H is not None:
    corners = numpy.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
    corners = numpy.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
    cv2.polylines(vis, [corners], True, (55, 55, 55))

if status is None:
    status = numpy.ones(len(kp_pairs), numpy.bool_)
p1 = numpy.int32([kpp[0].pt for kpp in kp_pairs])
p2 = numpy.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)

green = (0, 55, 0)
red = (0, 0, 55)
white = (55, 55, 55)
kp_color = (25, 50, 136)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
    if inlier:
        col = green
        cv2.circle(vis, (x1, y1), 2, col, -1)
        cv2.circle(vis, (x2, y2), 2, col, -1)
    else:
        col = red
        r = 2
        thickness = 3
        cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
        cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
        cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
        cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
    if inlier:
        cv2.line(vis, (x1 ...
(more)