import cv2 as cv
import dlib
import imutils
import numpy as np
from imutils import face_utils as fu
def eye_aspect_ratio(eye):
a = np.linalg.norm(eye[1] - eye[5])
b = np.linalg.norm(eye[2] - eye[4])
c = np.linalg.norm(eye[0] - eye[3])
d = (a + b) / (2.0 * c)
return d
def mouth_aspect_ratio(mouth):
a = np.linalg.norm(mouth[3] - mouth[9])
b = np.linalg.norm(mouth[14] - mouth[18])
c = np.linalg.norm(mouth[0] - mouth[6])
d = np.linalg.norm(mouth[12] - mouth[16])
e = (a + b) / (c + d)
return e
EYE_COUNTER = 0
EYE_TOTAL = 0
MOUTH_COUNTER = 0
MOUTH_TOTAL = 0
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./')
(lStart, lEnd) = fu.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = fu.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = fu.FACIAL_LANDMARKS_IDXS["mouth"]
while cap.isOpened():
_, frame = cap.read()
frame = imutils.resize(frame, width=750)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = detector(gray, 0)
for face in faces:
shape = predictor(gray, face)
shape = fu.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
earAVG = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv.convexHull(leftEye)
rightEyeHull = cv.convexHull(rightEye)
cv.drawContours(frame, [leftEyeHull], -1, (255, 255, 255), 1)
cv.drawContours(frame, [rightEyeHull], -1, (255, 255, 255), 1)
Mouth = shape[mStart:mEnd]
mar = mouth_aspect_ratio(Mouth)
MouthHull = cv.convexHull(Mouth)
cv.drawContours(frame, [MouthHull], -1, (255, 255, 255), 1)
'''
若眼睛纵横比小于0.3 且 连续3帧
则认为闭眼
'''
if earAVG < 0.3:
EYE_COUNTER += 1
else:
if EYE_COUNTER >= 3:
EYE_TOTAL += 1
EYE_COUNTER = 0
'''
若嘴巴纵横比大于0.5 且 连续3帧
则认为打哈欠
'''
if mar > 0.5:
MOUTH_COUNTER += 1
else:
if MOUTH_COUNTER >= 3:
MOUTH_TOTAL += 1
MOUTH_COUNTER = 0
cv.putText(
frame,
"Blinks:{0}".format(EYE_TOTAL),
(10, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"earAVG:{0}".format(earAVG),
(200, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"Yawning:{0}".format(MOUTH_TOTAL),
(10, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"mar:{0}".format(mar),
(200, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"Press 'Esc' to Quit",
(515, 550),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 255, 255),
2,
cv.LINE_AA
)
cv.imshow('camera', frame)
if cv.waitKey(1) & 0xff == 27:
break
cv.destroyAllWindows()
cap.release()
import cv2 as cv
import dlib
import imutils
from imutils import face_utils as fu
import eye
import head
import mouth
EYE_COUNTER = 0
EYE_TOTAL = 0
ALWAYS_CLOSE_EYES_COUNTER = 0
AL_eye_flag = False
hCOUNTER = 0
hTOTAL = 0
ALWAYS_CLOSE_HEAD_COUNTER = 0
AL_head_flag = False
MOUTH_COUNTER = 0
MOUTH_TOTAL = 0
cap = cv.VideoCapture(0, cv.CAP_DSHOW)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./')
while cap.isOpened():
_, frame = cap.read()
frame = imutils.resize(frame, width=750)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = detector(gray, 0)
for face in faces:
shape = predictor(gray, face)
shape = fu.shape_to_np(shape)
earAVG = eye.eye_detecting(frame, shape)
mar = mouth.mouth_detecting(frame, shape)
reprojectdst, euler_angle = head.get_head_pose(shape)
'''
若眼睛纵横比小于0.3 且 连续3帧
则认为闭眼
'''
if earAVG < 0.3:
EYE_COUNTER += 1
ALWAYS_CLOSE_EYES_COUNTER += 1
else:
if EYE_COUNTER >= 3:
EYE_TOTAL += 1
EYE_COUNTER = 0
ALWAYS_CLOSE_EYES_COUNTER = 0
if ALWAYS_CLOSE_EYES_COUNTER >= 18:
AL_eye_flag = True
else:
AL_eye_flag = False
'''
若嘴巴纵横比大于0.5 且 连续3帧
则认为打哈欠
'''
if mar > 0.5:
MOUTH_COUNTER += 1
else:
if MOUTH_COUNTER >= 3:
MOUTH_TOTAL += 1
MOUTH_COUNTER = 0
'''
头部pitch大于0.3 且 连续3帧
则认为点头一次
'''
har = euler_angle[0, 0]
if har > 15:
hCOUNTER += 1
ALWAYS_CLOSE_HEAD_COUNTER += 1
else:
if hCOUNTER >= 3:
hTOTAL += 1
hCOUNTER = 0
ALWAYS_CLOSE_HEAD_COUNTER = 0
if ALWAYS_CLOSE_HEAD_COUNTER >= 18:
AL_head_flag = True
else:
AL_head_flag = False
cv.putText(
frame,
"Blinks:{0}".format(EYE_TOTAL),
(10, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 0, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"earAVG:{0}".format(earAVG),
(200, 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 0, 255),
2,
cv.LINE_AA
)
if AL_eye_flag:
cv.putText(
frame,
"WARNING EYE",
(200, 150),
cv.FONT_HERSHEY_COMPLEX_SMALL,
2,
(100, 100, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"Yawning:{0}".format(MOUTH_TOTAL),
(10, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 0, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"mar:{0}".format(mar),
(200, 50),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(0, 0, 255),
2,
cv.LINE_AA
)
for start, end in head.line_pairs:
a = reprojectdst[start]
b = reprojectdst[end]
x1 = int(a[0])
y1 = int(a[1])
x2 = int(b[0])
y2 = int(b[1])
cv.line(frame, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv.putText(
frame,
"X: " + "{:7.2f}".format(euler_angle[0, 0]),
(10, 90),
cv.FONT_HERSHEY_SIMPLEX,
0.75,
(0, 255, 0),
2
)
cv.putText(
frame,
"Y: " + "{:7.2f}".format(euler_angle[1, 0]),
(150, 90),
cv.FONT_HERSHEY_SIMPLEX,
0.75,
(255, 0, 0),
2
)
cv.putText(
frame,
"Z: " + "{:7.2f}".format(euler_angle[2, 0]),
(300, 90),
cv.FONT_HERSHEY_SIMPLEX,
0.75,
(0, 0, 255),
2
)
cv.putText(
frame,
"Nod: {}".format(hTOTAL),
(450, 90),
cv.FONT_HERSHEY_SIMPLEX,
0.7,
(255, 255, 0),
2
)
cv.putText(
frame,
"har: {}".format(har),
(10, 120),
cv.FONT_HERSHEY_SIMPLEX,
0.7,
(255, 255, 0),
2
)
if AL_head_flag:
cv.putText(
frame,
"WARNING HEAD",
(200, 250),
cv.FONT_HERSHEY_COMPLEX_SMALL,
2,
(100, 100, 255),
2,
cv.LINE_AA
)
cv.putText(
frame,
"Press 'Esc' to Quit",
(515, 550),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
(255, 0, 0),
2,
cv.LINE_AA
)
cv.imshow('camera', frame)
if cv.waitKey(1) & 0xff == 27:
break
cv.destroyAllWindows()
cap.release()
import math
import cv2 as cv
import numpy as np
object_pts = np.float32([[6.825897, 6.760612, 4.402142],
[1.330353, 7.122144, 6.903745],
[-1.330353, 7.122144, 6.903745],
[-6.825897, 6.760612, 4.402142],
[5.311432, 5.485328, 3.987654],
[1.789930, 5.393625, 4.413414],
[-1.789930, 5.393625, 4.413414],
[-5.311432, 5.485328, 3.987654],
[2.005628, 1.409845, 6.165652],
[-2.005628, 1.409845, 6.165652],
[2.774015, -2.080775, 5.048531],
[-2.774015, -2.080775, 5.048531],
[0.000000, -3.116408, 6.097667],
[0.000000, -7.415691, 4.070434]])
K = [6.5308391993466671e+002, 0.0, 3.1950000000000000e+002,
0.0, 6.5308391993466671e+002, 2.3950000000000000e+002,
0.0, 0.0, 1.0]
D = [7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000]
cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)
dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)
reprojectsrc = np.float32([[10.0, 10.0, 10.0],
[10.0, 10.0, -10.0],
[10.0, -10.0, -10.0],
[10.0, -10.0, 10.0],
[-10.0, 10.0, 10.0],
[-10.0, 10.0, -10.0],
[-10.0, -10.0, -10.0],
[-10.0, -10.0, 10.0]])
line_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],
[4, 5], [5, 6], [6, 7], [7, 4],
[0, 4], [1, 5], [2, 6], [3, 7]]
def get_head_pose(shape):
image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],
shape[39], shape[42], shape[45], shape[31], shape[35],
shape[48], shape[54], shape[57], shape[8]])
_, rotation_vec, translation_vec = cv.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)
reprojectdst, _ = cv.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix, dist_coeffs)
reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))
rotation_mat, _ = cv.Rodrigues(rotation_vec)
pose_mat = cv.hconcat((rotation_mat, translation_vec))
_, _, _, _, _, _, euler_angle = cv.decomposeProjectionMatrix(pose_mat)
pitch, yaw, roll = [math.radians(_) for _ in euler_angle]
pitch = math.degrees(math.asin(math.sin(pitch)))
roll = -math.degrees(math.asin(math.sin(roll)))
yaw = math.degrees(math.asin(math.sin(yaw)))
return reprojectdst, euler_angle
import cv2 as cv
import numpy as np
from imutils import face_utils as fu
def eye_aspect_ratio(eye):
a = np.linalg.norm(eye[1] - eye[5])
b = np.linalg.norm(eye[2] - eye[4])
c = np.linalg.norm(eye[0] - eye[3])
d = (a + b) / (2.0 * c)
return d
def eye_detecting(frame, shape):
(lStart, lEnd) = fu.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = fu.FACIAL_LANDMARKS_IDXS["right_eye"]
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
earAVG = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv.convexHull(leftEye)
rightEyeHull = cv.convexHull(rightEye)
cv.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
return earAVG
import cv2 as cv
import numpy as np
from imutils import face_utils as fu
def mouth_aspect_ratio(mouth):
a = np.linalg.norm(mouth[3] - mouth[9])
b = np.linalg.norm(mouth[14] - mouth[18])
c = np.linalg.norm(mouth[0] - mouth[6])
d = np.linalg.norm(mouth[12] - mouth[16])
e = (a + b) / (c + d)
return e
def mouth_detecting(frame, shape):
(mStart, mEnd) = fu.FACIAL_LANDMARKS_IDXS["mouth"]
Mouth = shape[mStart:mEnd]
mar = mouth_aspect_ratio(Mouth)
MouthHull = cv.convexHull(Mouth)
cv.drawContours(frame, [MouthHull], -1, (0, 255, 0), 1)
return mar