-
Notifications
You must be signed in to change notification settings - Fork 1
/
blinks, smiles, eyebrow raises.py
129 lines (97 loc) · 3.38 KB
/
blinks, smiles, eyebrow raises.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import cv2
from imutils import face_utils
import numpy as np
import argparse
import imutils
import dlib
import math
from PIL import Image
from subprocess import call
import os
import threading
import time
import os
from scipy.spatial import distance as dist
#Importing Haar cascade and DLIB's facial landmarks detector
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Start video capture (webcam)
video = cv2.VideoCapture(0)
blinks = 0
def smile_aspect_ratio(mouth):
A = dist.euclidean(mouth[3], mouth[9])
B = dist.euclidean(mouth[2], mouth[10])
C = dist.euclidean(mouth[4], mouth[8])
L = (A+B+C)/3
D = dist.euclidean(mouth[0], mouth[6])
mar=L/D
return mar
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def eyebrowratiofunc(eyebrow, eye):
A = dist.euclidean(eyebrow[0], eye[1])
B = dist.euclidean(eyebrow[1], eye[1])
C = dist.euclidean(eyebrow[2], eye[1])
X = dist.euclidean(eyebrow[0], eyebrow[2])
ear = (A + B + C) / (3.0 * X)
return ear
counter = 0
while(True):
ret, frame = video.read()
cv2.imshow('Original video feed', frame)
#Convert the frame to grayscale
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#Activating Haar cascade classifier to detect faces
faces = face_cascade.detectMultiScale(grayFrame, scaleFactor = 1.5, minNeighbors = 5)
for(x, y, w, h) in faces :
pillowImage = Image.fromarray(frame[y:y+h, x:x+w])
#Resizing dimensions
resizedHeight = 300
resizedWidth = 300
######
faceCropped = np.array(pillowImage.resize((resizedHeight, resizedWidth), Image.ANTIALIAS))
#Initialize dlib's rectangle to start plotting points over shape of the face
dlibRect = dlib.rectangle(0, 0, resizedHeight, resizedWidth)
shape = predictor(cv2.cvtColor(faceCropped, cv2.COLOR_BGR2GRAY), dlibRect)
shapeCopy = shape
shape = face_utils.shape_to_np(shape)
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
if ear < 0.25:
counter += 1
if(counter > 2):
blinks = blinks+1
counter = 0
print("BLINK! (Eye aspect ratio : {} Blink counter : {})".format(ear, blinks))
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
mouth= shape[mStart:mEnd]
sar = smile_aspect_ratio(mouth)
if sar < 0.20:
print("SMILE!")
(eyebrowStart, eyebrowEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
eyebrow = shape[eyebrowStart:eyebrowEnd]
eyebrowratio = eyebrowratiofunc(eyebrow, leftEye)
if eyebrowratio > 1.0:
print("EYEBROW!")
if cv2.waitKey(20) & 0xFF == ord('q') :
break
video.release()
cv2.destroyAllWindows()