HI there,
I'm creating a physical media controller using an arduino and python/opencv etc. My goal is to select the frame of the videofile using a sensor hooked up to the arduino. Right now I am using serial communication to read the sensor variable coming from arduino. This can be done in "realtime". However as soon as I use the videocapture.set(1,frame_number) the whole script becomes enormously laggy (think more than 10 seconds) and sometimes it just stops.
I am using the following code:
# the needed libraries/packages
from imutils.video import WebcamVideoStream
from imutils import face_utils
from time import sleep
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
import serial
import keyboard
arduinoSerial = serial.Serial('COM4',115200)
frame_no = 60
crosshairSize = 10
ap = argparse.ArgumentParser()
ap.add_argument("-s","--shape-predictor",default="./shape_predictor_5_face_landmarks.dat",help="path to facial landmark predictor")
ap.add_argument("-p", "--prototxt", default="./deploy.prototxt.txt",help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", default="./res10_300x300_ssd_iter_140000.caffemodel", help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.5, help="minimum probability to filter weak detections")
ap.add_argument("-b","--blurtreshold",type=float,default=100.0,help="The sharpnes treshold")
ap.add_argument("-t", "--sizetreshold",type=float,default=0.4,help="The amount of pixels the face takes in percentage of total video resolution")
#ap.add_argument("-f","--filecap",help="The sharpnes treshold")
args = vars(ap.parse_args())
print("[INFO] loading models...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"]) #find faces using Deep Neural Net
predictor = dlib.shape_predictor(args["shape_predictor"]) #Find face features using dlib
cap = cv2.VideoCapture('./luke_1.avi') #video_name is the video being called
while True:
arduinoData = arduinoSerial.read()
arduinoData = int.from_bytes(arduinoData,byteorder='little')
print(arduinoData)
frame_no = arduinoData
cap.set(1,frame_no)
ret, frame = cap.retrieve() # Read the frame
frame = imutils.resize(frame,width=800)
cv2.imshow('window_name', frame) # show frame on window
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
cap.release()
cv2.destroyAllWindows()
Could anyone guide me on how to do this efficiently? To summarize my goal is: I want to display and select a frame of a video, dynamically and with an fps of at least 20. The selected frame depends on a variable pushed over serial communication towards the python script.
Any help would be much appreciated!