Introduction: Opencv Face Detection,Training and Recognition

OpenCV is an open source computer vision library which is very popular for performing basic image processing tasks such as blurring, image blending, enhancing image as well as video quality, thresholding etc. In addition to image processing, it provides various pre-trained deep learning models which can be directly used to solve simple tasks at hand.

for opencv installation use this link

https://www.instructables.com/id/Opencv-and-Python...

Step 1: Detecting Face in a Real Time Video

you can search google for many face detection programs and the detected faces should be saved in a folder for further image processing things like training and labeling. we are going to collect 30 samples

import cv2

import numpy as np import os import sys

camera = cv2.VideoCapture(0)

faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") #add your harcascade file path

name = raw_input("What's his/her Name? ")

#all the files will be saved under Users/prasad/Documents/images folder

dirName = "/Users/prasad/Documents/images/" + name print(dirName) if not os.path.exists(dirName): os.makedirs(dirName) print("Directory Created") else: print("Name already exists") sys.exit()

count = 1

#we are going to collect 30 samples while count < 31: # for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): ret,frame = camera.read() if count > 30: break # frame = frame.array gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.5, 5) for (x, y, w, h) in faces: roiGray = gray[y:y+h, x:x+w] fileName = dirName + "/" + name + str(count) + ".jpg" cv2.imwrite(fileName, roiGray) cv2.imshow("face", roiGray) cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2) count += 1 cv2.imshow('frame', frame) key = cv2.waitKey(1)

if key == 27: break

#camera.release() cv2.destroyAllWindows()

Step 2: Training Your Sample Images

After Face Detection was completed then we can go for training the images

import os
import numpy as np from PIL import Image import cv2 import pickle #import serial

#ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)

faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") recognizer = cv2.face.LBPHFaceRecognizer_create()

baseDir = os.path.dirname(os.path.abspath(__file__))

#train the images under the images folder imageDir = os.path.join(baseDir, "images")

currentId = 1 labelIds = {} yLabels = [] xTrain = [] #ser.write("Training.....".encode())

for root, dirs, files in os.walk(imageDir): print(root, dirs, files) for file in files: print(file) if file.endswith("png") or file.endswith("jpg"): path = os.path.join(root, file) label = os.path.basename(root) print(label)

if not label in labelIds: labelIds[label] = currentId print(labelIds) currentId += 1

id_ = labelIds[label] pilImage = Image.open(path).convert("L") imageArray = np.array(pilImage, "uint8") faces = faceCascade.detectMultiScale(imageArray, scaleFactor=1.1, minNeighbors=5)

for (x, y, w, h) in faces: roi = imageArray[y:y+h, x:x+w] xTrain.append(roi) yLabels.append(id_)

with open("labels", "wb") as f: pickle.dump(labelIds, f) f.close()

recognizer.train(xTrain, np.array(yLabels)) recognizer.save("trainer.yml") print(labelIds)

Step 3: Recognizing Faces

after the training was over now you can run the below code so that it will start recognizing your trained faces

import os
os.environ['PYTHONINSPECT'] = 'on' import cv2 import numpy as np import pickle #import RPi.GPIO as GPIO from time import sleep

with open('labels', 'rb') as f: dicti = pickle.load(f) f.close()

camera = cv2.VideoCapture(0)

faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml") recognizer = cv2.face.LBPHFaceRecognizer_create() recognizer.read("trainer.yml")

font = cv2.FONT_HERSHEY_SIMPLEX last=''

#for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True): while True: ret,frame = camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5) for (x, y, w, h) in faces: roiGray = gray[y:y+h, x:x+w]

id_, conf = recognizer.predict(roiGray)

for name, value in dicti.items(): if value == id_: print(name) cv2.putText(frame, name, (x, y), font, 2, (0, 0 ,255), 2,cv2.LINE_AA) if name!=last : last=name if conf <= 70: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

cv2.imshow('frame', frame) key = cv2.waitKey(1)

if key == 27: break cv2.destroyAllWindows()