New oriented object structure
This commit is contained in:
parent
f7dd4fb73e
commit
ad07fc1c54
3 changed files with 98 additions and 106 deletions
gaze_tracking
|
@ -1,29 +1,24 @@
|
|||
import os
|
||||
import math
|
||||
import numpy as np
|
||||
import cv2
|
||||
import dlib
|
||||
from .pupil import Pupil
|
||||
|
||||
|
||||
class EyesDetector(object):
|
||||
class Eye(object):
|
||||
"""
|
||||
This class detects the position of the eyes of a face,
|
||||
and creates two new frames to isolate each eye.
|
||||
This class creates a new frame to isolate the eye and
|
||||
initiates the pupil detection.
|
||||
"""
|
||||
|
||||
LEFT_EYE_POINTS = [36, 37, 38, 39, 40, 41]
|
||||
RIGHT_EYE_POINTS = [42, 43, 44, 45, 46, 47]
|
||||
def __init__(self, original_frame, landmarks, points):
|
||||
self.frame = None
|
||||
self.origin = None
|
||||
self.center = None
|
||||
self.blinking = self._blinking_ratio(landmarks, points)
|
||||
self.pupil = None
|
||||
|
||||
def __init__(self):
|
||||
self.frame_left = None
|
||||
self.frame_left_origin = None
|
||||
self.frame_right = None
|
||||
self.frame_right_origin = None
|
||||
self.blinking = None
|
||||
self._face_detector = dlib.get_frontal_face_detector()
|
||||
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||
model_path = os.path.abspath(os.path.join(cwd, "trained_models/shape_predictor_68_face_landmarks.dat"))
|
||||
self._predictor = dlib.shape_predictor(model_path)
|
||||
self._isolate(original_frame, landmarks, points)
|
||||
self.pupil = Pupil(self.frame)
|
||||
|
||||
@staticmethod
|
||||
def _middle_point(p1, p2):
|
||||
|
@ -37,17 +32,13 @@ class EyesDetector(object):
|
|||
y = int((p1.y + p2.y) / 2)
|
||||
return (x, y)
|
||||
|
||||
@staticmethod
|
||||
def isolate_eye(frame, landmarks, points):
|
||||
def _isolate(self, frame, landmarks, points):
|
||||
"""Isolate an eye, to have a frame without other part of the face.
|
||||
|
||||
Arguments:
|
||||
frame (numpy.ndarray): Frame containing the face
|
||||
landmarks (dlib.full_object_detection): Facial landmarks for the face region
|
||||
points (list): Points of an eye (from the 68 Multi-PIE landmarks)
|
||||
|
||||
Returns:
|
||||
A tuple with the eye frame and its origin
|
||||
"""
|
||||
region = np.array([(landmarks.part(point).x, landmarks.part(point).y) for point in points])
|
||||
region = region.astype(np.int32)
|
||||
|
@ -65,11 +56,14 @@ class EyesDetector(object):
|
|||
max_x = np.max(region[:, 0]) + margin
|
||||
min_y = np.min(region[:, 1]) - margin
|
||||
max_y = np.max(region[:, 1]) + margin
|
||||
roi = eye[min_y:max_y, min_x:max_x]
|
||||
|
||||
return (roi, (min_x, min_y))
|
||||
self.frame = eye[min_y:max_y, min_x:max_x]
|
||||
self.origin = (min_x, min_y)
|
||||
|
||||
def blinking_ratio(self, landmarks, points):
|
||||
height, width = self.frame.shape[:2]
|
||||
self.center = (width / 2, height / 2)
|
||||
|
||||
def _blinking_ratio(self, landmarks, points):
|
||||
"""Calculates a ratio that can indicate whether an eye is closed or not.
|
||||
It's the division of the width of the eye, by its height.
|
||||
|
||||
|
@ -86,21 +80,3 @@ class EyesDetector(object):
|
|||
eye_height = math.hypot((top[0] - bottom[0]), (top[1] - bottom[1]))
|
||||
|
||||
return eye_width / eye_height
|
||||
|
||||
def process(self, frame):
|
||||
"""Run eyes detection"""
|
||||
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
faces = self._face_detector(frame_gray)
|
||||
|
||||
try:
|
||||
landmarks = self._predictor(frame, faces[0])
|
||||
|
||||
self.frame_left, self.frame_left_origin = self.isolate_eye(frame_gray, landmarks, self.LEFT_EYE_POINTS)
|
||||
self.frame_right, self.frame_right_origin = self.isolate_eye(frame_gray, landmarks, self.RIGHT_EYE_POINTS)
|
||||
|
||||
blinking_left = self.blinking_ratio(landmarks, self.LEFT_EYE_POINTS)
|
||||
blinking_right = self.blinking_ratio(landmarks, self.RIGHT_EYE_POINTS)
|
||||
self.blinking = (blinking_left + blinking_right) / 2
|
||||
|
||||
except IndexError:
|
||||
pass
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
import cv2
|
||||
from .eyes import EyesDetector
|
||||
from .pupil import PupilDetector
|
||||
import dlib
|
||||
from .eye import Eye
|
||||
|
||||
|
||||
class GazeTracking(object):
|
||||
|
@ -10,91 +11,113 @@ class GazeTracking(object):
|
|||
and the pupil and allows to know if the eyes are open or closed
|
||||
"""
|
||||
|
||||
LEFT_EYE_POINTS = [36, 37, 38, 39, 40, 41]
|
||||
RIGHT_EYE_POINTS = [42, 43, 44, 45, 46, 47]
|
||||
|
||||
def __init__(self):
|
||||
self.frame = None
|
||||
self.eyes = EyesDetector()
|
||||
self.pupil_left = PupilDetector()
|
||||
self.pupil_right = PupilDetector()
|
||||
self.eye_left = None
|
||||
self.eye_right = None
|
||||
|
||||
# _face_detector is used to detect faces
|
||||
self._face_detector = dlib.get_frontal_face_detector()
|
||||
|
||||
# _predictor is used to get facial landmarks of a given face
|
||||
cwd = os.path.abspath(os.path.dirname(__file__))
|
||||
model_path = os.path.abspath(os.path.join(cwd, "trained_models/shape_predictor_68_face_landmarks.dat"))
|
||||
self._predictor = dlib.shape_predictor(model_path)
|
||||
|
||||
@property
|
||||
def pupils_located(self):
|
||||
"""Check that the pupils have been located"""
|
||||
try:
|
||||
int(self.eye_left.pupil.x)
|
||||
int(self.eye_left.pupil.y)
|
||||
int(self.eye_right.pupil.x)
|
||||
int(self.eye_right.pupil.y)
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def _analyze(self):
|
||||
"""Detects the face and initialize Eye objects"""
|
||||
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
|
||||
faces = self._face_detector(frame)
|
||||
|
||||
try:
|
||||
landmarks = self._predictor(frame, faces[0])
|
||||
self.eye_left = Eye(frame, landmarks, self.LEFT_EYE_POINTS)
|
||||
self.eye_right = Eye(frame, landmarks, self.RIGHT_EYE_POINTS)
|
||||
|
||||
except IndexError:
|
||||
self.eye_left = None
|
||||
self.eye_right = None
|
||||
|
||||
def refresh(self, frame):
|
||||
"""Refresh the frame and analyzes it."""
|
||||
self.frame = frame
|
||||
self.eyes.process(self.frame)
|
||||
self.pupil_left.process(self.eyes.frame_left)
|
||||
self.pupil_right.process(self.eyes.frame_right)
|
||||
self._analyze()
|
||||
|
||||
def pupil_left_coords(self):
|
||||
"""Returns the coordinates of the left pupil"""
|
||||
try:
|
||||
x = self.eyes.frame_left_origin[0] + self.pupil_left.x
|
||||
y = self.eyes.frame_left_origin[1] + self.pupil_left.y
|
||||
if self.pupils_located:
|
||||
x = self.eye_left.origin[0] + self.eye_left.pupil.x
|
||||
y = self.eye_left.origin[1] + self.eye_left.pupil.y
|
||||
return (x, y)
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
def pupil_right_coords(self):
|
||||
"""Returns the coordinates of the right pupil"""
|
||||
try:
|
||||
x = self.eyes.frame_right_origin[0] + self.pupil_right.x
|
||||
y = self.eyes.frame_right_origin[1] + self.pupil_right.y
|
||||
if self.pupils_located:
|
||||
x = self.eye_right.origin[0] + self.eye_left.pupil.x
|
||||
y = self.eye_right.origin[1] + self.eye_left.pupil.y
|
||||
return (x, y)
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
def horizontal_ratio(self):
|
||||
"""Returns a number between 0.0 and 1.0 that indicates the
|
||||
horizontal direction of the gaze. The extreme right is 0.0,
|
||||
the center is 0.5 and the extreme left is 1.0
|
||||
"""
|
||||
try:
|
||||
pupil_right = self.pupil_right.x / (self.pupil_right.center[0] * 2 - 10)
|
||||
pupil_left = self.pupil_left.x / (self.pupil_left.center[0] * 2 - 10)
|
||||
return (pupil_right + pupil_left) / 2
|
||||
except TypeError:
|
||||
return None
|
||||
if self.pupils_located:
|
||||
pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)
|
||||
pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)
|
||||
return (pupil_left + pupil_right) / 2
|
||||
|
||||
def vertical_ratio(self):
|
||||
"""Returns a number between 0.0 and 1.0 that indicates the
|
||||
vertical direction of the gaze. The extreme top is 0.0,
|
||||
the center is 0.5 and the extreme bottom is 1.0
|
||||
"""
|
||||
try:
|
||||
pupil_right = self.pupil_right.y / (self.pupil_right.center[1] * 2 - 10)
|
||||
pupil_left = self.pupil_left.y / (self.pupil_left.center[1] * 2 - 10)
|
||||
return (pupil_right + pupil_left) / 2
|
||||
except TypeError:
|
||||
return None
|
||||
if self.pupils_located:
|
||||
pupil_left = self.eye_left.pupil.y / (self.eye_left.center[1] * 2 - 10)
|
||||
pupil_right = self.eye_right.pupil.y / (self.eye_right.center[1] * 2 - 10)
|
||||
return (pupil_left + pupil_right) / 2
|
||||
|
||||
def is_right(self):
|
||||
"""Returns true is the user is looking to the right"""
|
||||
try:
|
||||
"""Returns true if the user is looking to the right"""
|
||||
if self.pupils_located:
|
||||
return self.horizontal_ratio() <= 0.35
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
def is_left(self):
|
||||
"""Returns true is the user is looking to the left"""
|
||||
try:
|
||||
"""Returns true if the user is looking to the left"""
|
||||
if self.pupils_located:
|
||||
return self.horizontal_ratio() >= 0.65
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
def is_center(self):
|
||||
"""Returns true is the user is looking to the center"""
|
||||
return self.is_right() is not True and self.is_left() is not True
|
||||
"""Returns true if the user is looking to the center"""
|
||||
if self.pupils_located:
|
||||
return self.is_right() is not True and self.is_left() is not True
|
||||
|
||||
def is_blinking(self):
|
||||
"""Returns true if the user closes his eyes"""
|
||||
try:
|
||||
return self.eyes.blinking > 3.8
|
||||
except TypeError:
|
||||
return None
|
||||
if self.pupils_located:
|
||||
blinking_ratio = (self.eye_left.blinking + self.eye_right.blinking) / 2
|
||||
return blinking_ratio > 3.8
|
||||
|
||||
def annotated_frame(self):
|
||||
"""Returns the main frame with pupils highlighted """
|
||||
"""Returns the main frame with pupils highlighted"""
|
||||
frame = self.frame.copy()
|
||||
|
||||
try:
|
||||
if self.pupils_located:
|
||||
color = (0, 255, 0)
|
||||
x_left, y_left = self.pupil_left_coords()
|
||||
x_right, y_right = self.pupil_right_coords()
|
||||
|
@ -102,7 +125,5 @@ class GazeTracking(object):
|
|||
cv2.line(frame, (x_left, y_left - 5), (x_left, y_left + 5), color)
|
||||
cv2.line(frame, (x_right - 5, y_right), (x_right + 5, y_right), color)
|
||||
cv2.line(frame, (x_right, y_right - 5), (x_right, y_right + 5), color)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
return frame
|
||||
|
|
|
@ -2,18 +2,19 @@ import numpy as np
|
|||
import cv2
|
||||
|
||||
|
||||
class PupilDetector(object):
|
||||
class Pupil(object):
|
||||
"""
|
||||
This class detects the iris of an eye and estimates
|
||||
the position of the pupil
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.modified_frame = None
|
||||
self.center = None
|
||||
def __init__(self, eye_frame):
|
||||
self.iris_frame = None
|
||||
self.x = None
|
||||
self.y = None
|
||||
|
||||
self.detect_iris(eye_frame)
|
||||
|
||||
@staticmethod
|
||||
def image_processing(eye_frame):
|
||||
"""Performs operations on the eye frame to isolate the iris
|
||||
|
@ -26,22 +27,16 @@ class PupilDetector(object):
|
|||
"""
|
||||
kernel = np.ones((3, 3), np.uint8)
|
||||
new_frame = cv2.bilateralFilter(eye_frame, 10, 15, 15)
|
||||
new_frame = cv2.threshold(new_frame, 20, 255, cv2.THRESH_BINARY)[1]
|
||||
new_frame = cv2.erode(new_frame, kernel, iterations=3)
|
||||
new_frame = cv2.dilate(new_frame, kernel, iterations=2)
|
||||
new_frame = cv2.threshold(new_frame, 20, 255, cv2.THRESH_BINARY)[1]
|
||||
|
||||
return new_frame
|
||||
|
||||
def process(self, frame):
|
||||
def detect_iris(self, eye_frame):
|
||||
"""Run iris detection and pupil estimation"""
|
||||
if frame is None:
|
||||
return
|
||||
self.iris_frame = self.image_processing(eye_frame)
|
||||
|
||||
self.modified_frame = self.image_processing(frame)
|
||||
|
||||
height, width = self.modified_frame.shape[:2]
|
||||
self.center = (width / 2, height / 2)
|
||||
|
||||
_, contours, _ = cv2.findContours(self.modified_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||
_, contours, _ = cv2.findContours(self.iris_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
|
||||
contours = sorted(contours, key=cv2.contourArea)
|
||||
|
||||
try:
|
||||
|
|
Loading…
Add table
Reference in a new issue