Face Recognition in Python and sending a message through Whatsapp once face is successfully recognized.

Divesh Karkera
5 min readJun 24, 2021

Dear Readers,

This article incorporates the code and clarification for the preparation model for face acknowledgement, face location and communicating something specific through Whatsapp whenever a face is perceived.

Collection of Samples

import cv2
import numpy as npface_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# Load haar classifier
# Load functions

def face_extractor(img):
# Function detects faces and returns the cropped face
# If no face detected, it returns the input image

gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# We convert our color image to grayscale format for faster calculation
faces = face_classifier.detectMultiScale(gray)
# Detecting face
if faces is ():
#Empty round brackets represent empty tuple
return None
#If there are no faces detected return None i.e. resetting it back to original posititon here
for (x,y,w,h) in faces:
# Cropping all possible captured faces
cropped_face = img[ x:x+w , y:y+h]
# We store the complete image inside cropped_face array
# .detectMultiScale() returns x,y,w,h where (x,y) are top-left edge of a rectangle , w is width , h is height
# faces[0][0] = x ; faces[0][1] = y ; faces[0][2] = w ; faces[0][3] = h
# x1 = x ; y1 = y ; x2 = x1 + w (as x1 + width = x2) ; y2 = y1 + h (as y1 + height = y2)

print(type(cropped_face))
return cropped_face
# returning cropped_face to img in face_extractor function cap = cv2.VideoCapture(0)
# start webcam
count = 0
# Collect 100 samples of your face from webcam input
while True:
ret, frame = cap.read()
if face_extractor(frame) is not None:
# If there is cropped image ready and not empty, then true and get inside loop
count += 1
# Count increases by 1 if image captured
face = cv2.resize(face_extractor(frame), (200, 200))
# Image is resized into size (200,200)
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Converting image into grayscale
file_name_path = './faces/user/' + str(count) + '.jpg'
# Save file in specified directory with unique name
cv2.imwrite(file_name_path, face)
# Image is written on that path with name mentioned
# Put count on images and display live count

cv2.putText(face, str(count), (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Cropper', face)
# Show the window
else:
print("Face not found")
pass if cv2.waitKey(1) == 13 or count == 100:
# 13 is the Enter Key
break
cap.release()
# Close the camera
cv2.destroyAllWindows()
print("Collecting Samples Complete")
print(frame.shape)

Training Model

import cv2
import numpy as np
from os import listdir
from os.path import isfile, join
# Get the training data we previously made
data_path = './faces/user/' #our main folder
onlyfiles = [f for f in listdir(data_path) if isfile(join(data_path, f))]# listdir(datapath) is used to list directories/files in the datapath folder/directory.
# f for f in listdir(datapath) means listing all labels in that directory.
# isfile returns either true or false
# join(datapath,f) joins the file in the datapath.
# overall this line means that onlyfiles will store files that are present in the directory mentioned# Create arrays for training data and labels

Training_Data, Labels = [], []# Open training images in our datapath
# Create a numpy array for training data
for i, files in enumerate(onlyfiles):
# Doing transactions one by one on <onlyfiles>
image_path = data_path + onlyfiles[i]
# Storing Data Path combined with the image
images = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
# Reading the image at the path
Training_Data.append(np.asarray(images, dtype=np.uint8))
# Appending the training data and converting input to array(asarray)
Labels.append(i)
# Appending Labels array similarly
# Create a numpy array for both training data and labels
Labels = np.asarray(Labels, dtype=np.int32)
# Converting labels array into numpy format
# Initialize facial recognizer
# model = cv2.face.createLBPHFaceRecognizer()
# NOTE: For OpenCV 3.0 use cv2.face.createLBPHFaceRecognizer()
# pip install opencv-contrib-python
# model = cv2.createLBPHFaceRecognizer()
facemodel = cv2.face_LBPHFaceRecognizer.create()
# Creating the model
facemodel.train(np.asarray(Training_Data), np.asarray(Labels))
# Training the model with the Training Data and corresponding label
print("Model trained sucessefully")
# Facemodel.save('face_recognition_model.h5')
# If you want to save the model in your directory

Face Recognition

import cv2
import numpy as np
import osface_classifier = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')def face_detector(img, size=0.5):

# Convert image to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(gray, 1.3, 5)
if faces is ():
# if faces returns no value, return the same image screen available
return img, []
# return image and empty list since we have no values of x,y,w,h2
# creating a rectangle on the border of the face
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)
roi = img[y:y+h, x:x+w]
roi = cv2.resize(roi, (200, 200))
return img, roi
# return image and the cropped photograph
# Open Webcam

cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
image, face = face_detector(frame)
# function called
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
# Pass face to prediction model
# "results" comprises of a tuple containing the label and the confidence value

results = facemodel.predict(face)
# prediction using the cropped image
print(results)
# harry_model.predict(face)
confidence = int( 100 * (1 - (results[1])/400) ) # formula to find confidence score
display_string = str(confidence) + '% Confident it is User' # variable declared to store confidence percentage
print(confidence)
cv2.putText(image, display_string, (100, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255,120,150), 2)
if confidence > 80:
cv2.putText(image, "Hey Yash", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,0), 2)
cv2.imshow('Face Recognition', image )
break
# os.system("chrome https://www.google.com/)
# os.system("wmplayer c:\<filename>.mp3")
# break

else:
cv2.putText(image, "I dont know, how r u", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow('Face Recognition', image )
except:
cv2.putText(image, "No Face Found", (220, 120) , cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.putText(image, "looking for face", (250, 450), cv2.FONT_HERSHEY_COMPLEX, 1, (0,0,255), 2)
cv2.imshow('Face Recognition', image )
pass

if cv2.waitKey(1) == 13:
# 13 is the Enter Key
break
cap.release()
cv2.destroyAllWindows()

Whatsapp Module

# Here, I have used twilio for sending message through Whatsapp.from twilio.rest import Client 

account_sid = 'Account SID'
auth_token = 'Authentication Token'
client = Client(account_sid, auth_token)

message = client.messages.create(
from_='whatsapp:+1234567890',
body='Message from Python',
to='whatsapp:+1234567890'
)
print(message.sid)

OR

pywhatkit.sendwhatmsg_instantly(phone_no="+919987371229", message="Hi, I am Yash this is whatsapp message sent with Python.")
print("Whatsapp Message sent Successfully!!")

Mailing Module

pywhatkit.send_mail(email_sender= "<email id>",
password= "<password>",
subject="Automated E-mail",
message="System generated mail using OpenCV face detection.",
email_receiver="<sender email id>")

AWS Module

# Creating instance in AWS
os.system("aws ec2 run-instances --image-id ami-011c99152163a87ae --instance-type t2.micro --subnet-id subnet-06f7036d --count 1 --security-group-ids sg-5771be2b > ec2.txt")
print("Instance Launched")
# Creating volume of size 5gb
os.system("aws ec2 create-volume --availability-zone ap-south-1a --size 5 --volume-type gp2 --tag-specification ResourceType=volume,Tags=[{Key=face,Value=volume}] > ebs.txt")
print("Volume Created of size 5 gb")
print("Initiating in 120 seconds")
time.sleep(120)
ec2_id = open("ec2.txt", 'r').read().split(',')[3].split(':')[1].split('"')[1]
ebs_id = open("ebs.txt", 'r').read().split(',')[6].split(':')[1].split('"')[1]
os.system("aws ec2 attach-volume --instance-id " + ec2_id +" --volume-id " + ebs_id +" --device /dev/sdf")
print("Volume Successfully attached to the instance")

If you like this post, a tad of extra motivation will be helpful by giving this post some claps 👏.

You can reach me at:

Linkedin: https://www.linkedin.com/in/divesh-karkera-214586179.

--

--