This is a code for recognition of faces and attendance marking. I get the following error
The Errror is:-
Traceback (most recent call last):
File "C:\Users\ACER\Desktop\PROJECT ALL RESOURCE\PROJECT ALL RESOURCE\Implementation\PYTHON FILES\facerecognition.py", line 73, in <module>
(J.left(), J.bottom()),font, 200) # Writing the name of the face recognized
TypeError: Required argument 'color' (pos 6) not found
code:
import cv2, dlib
import os,time
import openface,urllib
import pandas as pd
from gv import groupid
import sys
import math
import sqlite3
from PIL import Image
from playsound import playsound
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter, cell,column_index_from_string
cam = cv2.VideoCapture(0)
#get current date
currentDate = time.strftime("%d_%m_%y")
detector = dlib.get_frontal_face_detector()
dlibFacePredictor = 'shape_predictor_68_face_landmarks.dat' # Path to dlib's face predictor
align = openface.AlignDlib(dlibFacePredictor)
subject=input("Enter Subject:")
a=['BI','SIC','PGIS','SQA','ITSM']
path= 'C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/images/'
sc=subject+currentDate
if subject in a:
print("Success")
wbook = load_workbook(filename = "C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Attendance/"+subject+".xlsx")
sheet = wbook.get_sheet_by_name('TYBSCIT'+subject)
def getDateColumns():
for i in range(1, len(sheet.rows[0]) + 1):
cols = get_column_letter(i)
if sheet.cell('%s%s'% (col,'1')).value == currentDate:
return cols
def getProfileId(Ids):
connect = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/sqlite3/Studentdb.db")
cmd = "SELECT * FROM Students WHERE ID=" + str(Ids)
cursor = connect.execute(cmd)
profile = None
for row in cursor:
profile = row
connect.close()
return profile
attend = [0 for i in range(60)]
rec = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms
rec.read('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Training/trainingData.yml') # loading the trained data
picNumber = 2
image= cv2.imread(path+"/"+sc+".jpg")
currentDir = os.path.dirname(os.path.abspath(__file__))
directory = os.path.join(currentDir, 'Extracted_Faces')
font = cv2.FONT_HERSHEY_SIMPLEX # the font of text on face recognition
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale
dets = detector(image, 0)
totalConf = 0.0
faceRec = 0
for i, J in enumerate(dets):
image2 = image[J.top():J.bottom(), J.left():J.right()]
rgbImg = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
bb = align.getLargestFaceBoundingBox(rgbImg)
alignedFace = align.align(96, rgbImg, bb=None, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
alignedFace= cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale
Ids, conf = rec.predict(alignedFace) # Comparing from the trained data
if conf < 50:
totalConf += conf
faceRec += 1
profile = getProfileId(Ids)
if profile != None:
cv2.putText(image,
profile[1] + str("(%.2f)" % conf),
(d.left(), d.bottom()), font, (0, 0, 0)) # Writing the name of the face recognized
else :
cv2.putText(image,
"Unknown" + str(conf),
(J.left(), J.bottom()),font, 200) # Writing the name of the face recognized
# cv2.imwrite(picFolderName, img[d.top():d.bottom(), d.left():d.right()])
cv2.rectangle(image, (d.left(), d.top()), (d.right(), d.bottom()), (255, 255, 255), 2)
cv2.imshow('Frames', image) # Showing each frame on the window
cv2.imwrite(path + '/verification' + str ...
lookup docs for cv2.putText()