Hi!
I have even older GPU chip than you mentioned (my gpu is GTX 970) and it works perfectly well for me with OpenCV 4.1.1.
I have compiled darknet with CUDA 10.0 and cuDNN 7.4 (for CUDA 10.0) and particulary for darknet compilation, I used OpenCV 3.3 according to this link: [https://github.com/AlexeyAB/darknet] recommendation.
I trained my own "YOLOv3 " model based on yolov3-tiny and used it within the following Python code (you can just use the standard yolo models):
import cv2 as cv
import numpy as np
classFile = "obj.names" #my own class names or just use coco.names
with open(classFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
modelConf = 'yolov3-tiny_obj.cfg' #or just use yolov3.cfg
modelWeights = 'yolov3-tiny_obj_7000.weights' #or just use yolov3.weights
net = cv.dnn.readNetFromDarknet(modelConf, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL_FP16)
winName = "YOLOv3 + OpenCV"
cv.namedWindow(winName, cv.WINDOW_NORMAL)
cv.resizeWindow(winName, 1280, 720)
cap = cv.VideoCapture(inputFile)
while(True):
_,frame = cap.read()
if np.shape(frame) != ():
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0],1,crop=False)
net.setInput(blob)
outs = net.forward(getOutputsNames(net)) #reading .name file according to extracted objects
frameExtract(frame, outs) #standard frame extraction. I skipped to be short.
cv.imshow(winName, frame)
k = cv.waitKey(1) & 0xFF
else:
print("Reinitialize capture device ", time.ctime())
cap = cv.VideoCapture(inputFile)
time.sleep(1)
k = cv.waitKey(1) & 0xFF
if k == 27:
cv.destroyAllWindows()
break