Writing multiple videos from webcam stream

asked 2019-04-17 16:13:27 -0600

updated 2019-04-17 16:18:20 -0600

mshabunin gravatar image

Hey Everyone, so I've been working on my senior project and it's meant to write out the video recorded from a webcam connected to a raspberry pi and function as a motion detector. I'm having trouble getting OpenCV to write out the next motion once the initial motion is made. Currently it continues to write the second instance of motion to the same file as the first. Can anyone provide examples of how to fix this issue or provide assistance with the code below? Thanks in advance

import cv2, time, pandas, json, argparse, imutils,dropbox,functools, operator,os ,subprocess
from datetime import datetime
import datetime
from picamera.array import PiRGBArray
from picamera import PiCamera
from subprocess import Popen, PIPE 

#Argument to set the min area of motion detection
#To start python ProgramName.py -a Area  
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--min-area" , type= int, default=500, help="min area size")
args = vars(ap.parse_args())


stillFrameCounter=0

camera = cv2.VideoCapture(-1)
background = None
motion = [None, None]
time = []
rw = PiRGBArray(camera, size=tuple([649, 480]))
fourcc = cv2.VideoWriter_fourcc(* 'DIVX')
name = str(datetime.datetime.now()) + '.avi'
at=dropbox.Dropbox('xxxxxxx')
uprev = datetime.datetime.now()
capper = cv2.VideoWriter(name, fourcc, 20.0, (640,480))
dboxup = "/home/pi/Desktop/dropbox_uploader.sh upload "
vup = "/home/pi/Desktop/tempvid/sample.text /" + name

while  True:
    print "Running: top of loop."
    check, frame = camera.read()
    motion = 0
    text = "Streaming"
    tw = datetime.datetime.now()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (21,21), 0 )

if background is None:
    background = gray.copy()
    b = gray.copy().astype("float")


       rw.truncate(0)
        continue

    difference = cv2.absdiff(background,gray)
    cv2.accumulateWeighted(gray, b, 0.5)
    df = cv2.absdiff(gray, cv2.convertScaleAbs(b))


    tf = cv2.threshold(difference, 30, 255, cv2.THRESH_BINARY)[1]
    tf = cv2.dilate(tf, None, iterations = 2)

    cnts = cv2.findContours(tf.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    motionDetected = False

    for c in cnts:
        if cv2.contourArea(c) < 5000:
            continue

        # motion+=1
        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x,y) , (x+w, y+h), (255, 0, 0), 2)
        motionDetected = True
        text="Motion"
        print "Motion Detected."
        stillFrameCounter=0

    ts = tw.strftime("%A %d %B %Y %I :%M:%S%p")  
    cv2.putText(frame, "Status: {} ".format(text), (10,20), cv2.FONT_HERSHEY_PLAIN, 0.5, (0,0, 255) ,2)

    if motionDetected:
        ret = False
        if(tw - uprev).seconds >= 3:
            ret,frame2 = camera.read()
            if ret: 
                capper.write(frame2)
    else:
        stillFrameCounter = stillFrameCounter + 1
        if(stillFrameCounter > 500):
            print "Releasing capper."
            capper.release()
            capper = cv2.VideoWriter(name, fourcc, 20.0, (640,480))

    cv2.imshow("Camera Stream", frame)
    cv2.waitKey(1)

print motion 

camera.release()
capper.release()
cv2.destroyAllWindows()
edit retag flag offensive close merge delete

Comments

I can't help you, because you're using python 2.7. I am using python3.5, OpenCV 4.1.0, Linux.

supra56 gravatar imagesupra56 ( 2019-04-18 09:26:13 -0600 )edit