Compare commits

...

1 Commits

Author SHA1 Message Date
Derek c46d5b559b Remove dlib components (assume user can provide landmarks) 2021-04-14 20:28:22 -07:00
6 changed files with 20 additions and 85 deletions

View File

@ -1,41 +0,0 @@
"""
Demonstration of the GazeTracking library.
Check the README.md for complete documentation.
"""
import cv2
from gaze_tracking import GazeTracking
gaze = GazeTracking()
webcam = cv2.VideoCapture(0)
while True:
# We get a new frame from the webcam
_, frame = webcam.read()
# We send this frame to GazeTracking to analyze it
gaze.refresh(frame)
frame = gaze.annotated_frame()
text = ""
if gaze.is_blinking():
text = "Blinking"
elif gaze.is_right():
text = "Looking right"
elif gaze.is_left():
text = "Looking left"
elif gaze.is_center():
text = "Looking center"
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = gaze.pupil_left_coords()
right_pupil = gaze.pupil_right_coords()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", frame)
if cv2.waitKey(1) == 27:
break

View File

@ -1 +1 @@
from .gaze_tracking import GazeTracking

View File

@ -13,37 +13,24 @@ class Eye(object):
LEFT_EYE_POINTS = [36, 37, 38, 39, 40, 41]
RIGHT_EYE_POINTS = [42, 43, 44, 45, 46, 47]
def __init__(self, original_frame, landmarks, side, calibration):
def __init__(self, original_frame, landmarks, margin, side, calibration):
self.frame = None
self.origin = None
self.center = None
self.pupil = None
self.margin = margin
self._analyze(original_frame, landmarks, side, calibration)
@staticmethod
def _middle_point(p1, p2):
"""Returns the middle point (x,y) between two points
Arguments:
p1 (dlib.point): First point
p2 (dlib.point): Second point
"""
x = int((p1.x + p2.x) / 2)
y = int((p1.y + p2.y) / 2)
return (x, y)
def _isolate(self, frame, landmarks, points):
"""Isolate an eye, to have a frame without other part of the face.
Arguments:
frame (numpy.ndarray): Frame containing the face
landmarks (dlib.full_object_detection): Facial landmarks for the face region
landmarks (numpy.ndarray): Facial landmarks for the face region
points (list): Points of an eye (from the 68 Multi-PIE landmarks)
"""
region = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points])
region = region.astype(np.int32)
region = landmarks[points].astype(np.int32)
# Applying a mask to get only the eye
height, width = frame.shape[:2]
black_frame = np.zeros((height, width), np.uint8)
@ -52,11 +39,10 @@ class Eye(object):
eye = cv2.bitwise_not(black_frame, frame.copy(), mask=mask)
# Cropping on the eye
margin = 5
min_x = np.min(region[:, 0]) - margin
max_x = np.max(region[:, 0]) + margin
min_y = np.min(region[:, 1]) - margin
max_y = np.max(region[:, 1]) + margin
min_x = np.min(region[:, 0]) - self.margin
max_x = np.max(region[:, 0]) + self.margin
min_y = np.min(region[:, 1]) - self.margin
max_y = np.max(region[:, 1]) + self.margin
self.frame = eye[min_y:max_y, min_x:max_x]
self.origin = (min_x, min_y)
@ -69,16 +55,16 @@ class Eye(object):
It's the division of the width of the eye, by its height.
Arguments:
landmarks (dlib.full_object_detection): Facial landmarks for the face region
landmarks (numpy.ndarray): Facial landmarks for the face region
points (list): Points of an eye (from the 68 Multi-PIE landmarks)
Returns:
The computed ratio
"""
left = (landmarks.part(points[0]).x, landmarks.part(points[0]).y)
right = (landmarks.part(points[3]).x, landmarks.part(points[3]).y)
top = self._middle_point(landmarks.part(points[1]), landmarks.part(points[2]))
bottom = self._middle_point(landmarks.part(points[5]), landmarks.part(points[4]))
left = landmarks[points[0]]
right = landmarks[points[3]]
top = np.mean(landmarks[points[1:3]], axis=0)
bottom = np.mean(landmarks[points[4:6]], axis=0)
eye_width = math.hypot((left[0] - right[0]), (left[1] - right[1]))
eye_height = math.hypot((top[0] - bottom[0]), (top[1] - bottom[1]))

View File

@ -1,7 +1,6 @@
from __future__ import division
import os
import cv2
import dlib
from .eye import Eye
from .calibration import Calibration
@ -19,14 +18,6 @@ class GazeTracking(object):
self.eye_right = None
self.calibration = Calibration()
# _face_detector is used to detect faces
self._face_detector = dlib.get_frontal_face_detector()
# _predictor is used to get facial landmarks of a given face
cwd = os.path.abspath(os.path.dirname(__file__))
model_path = os.path.abspath(os.path.join(cwd, "trained_models/shape_predictor_68_face_landmarks.dat"))
self._predictor = dlib.shape_predictor(model_path)
@property
def pupils_located(self):
"""Check that the pupils have been located"""
@ -40,26 +31,26 @@ class GazeTracking(object):
return False
def _analyze(self):
"""Detects the face and initialize Eye objects"""
"""Initialize Eye objects"""
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
faces = self._face_detector(frame)
try:
landmarks = self._predictor(frame, faces[0])
self.eye_left = Eye(frame, landmarks, 0, self.calibration)
self.eye_right = Eye(frame, landmarks, 1, self.calibration)
self.eye_left = Eye(frame, self.landmarks, 0, self.calibration)
self.eye_right = Eye(frame, self.landmarks, 1, self.calibration)
except IndexError:
self.eye_left = None
self.eye_right = None
def refresh(self, frame):
def refresh(self, frame, landmarks):
"""Refreshes the frame and analyzes it.
Arguments:
frame (numpy.ndarray): The frame to analyze
landmarks (numpy.ndarray): A full set of 68 face landmarks
"""
self.frame = frame
self.landmarks = landmarks
self._analyze()
def pupil_left_coords(self):

View File

@ -1,3 +1,2 @@
numpy == 1.16.1
opencv_python == 3.4.5.20
dlib == 19.16.0