A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and both Python 2.7 and Python 3.
91
Facial landmark processing tools and face alignment functionality for dlib-based face detection workflows. These utilities bridge dlib's face detection capabilities with OpenCV's image processing functions.
Automatic face alignment based on eye positions, supporting both 68-point and 5-point facial landmark detectors.
class FaceAligner:
def __init__(self, predictor, desiredLeftEye=(0.35, 0.35), desiredFaceWidth=256, desiredFaceHeight=None):
"""
Face alignment based on eye positions.
Args:
predictor: dlib facial landmark predictor object
desiredLeftEye (tuple): Desired left eye position as ratio (x, y) (default: (0.35, 0.35))
desiredFaceWidth (int): Desired output face width in pixels (default: 256)
desiredFaceHeight (int, optional): Desired output face height in pixels (default: uses desiredFaceWidth)
"""
def align(self, image, gray, rect):
"""
Align and crop face based on eye positions.
Args:
image (np.ndarray): Color input image
gray (np.ndarray): Grayscale version of input image
rect: dlib rectangle object representing face bounding box
Returns:
np.ndarray: Aligned and cropped face image
Note:
Automatically detects whether using 68-point or 5-point landmark detector.
Calculates rotation angle and scale based on eye positions.
Applies affine transformation to align eyes horizontally.
"""Usage Example:
import cv2
import dlib
from imutils.face_utils import FaceAligner
# Initialize dlib face detector and landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# Initialize face aligner
fa = FaceAligner(predictor, desiredFaceWidth=256)
# Load image
image = cv2.imread("face.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = detector(gray, 1)
for face in faces:
# Align face
aligned_face = fa.align(image, gray, face)
cv2.imshow("Aligned Face", aligned_face)
cv2.waitKey(0)
cv2.destroyAllWindows()Functions for converting between dlib and OpenCV coordinate formats.
def rect_to_bb(rect):
"""
Convert dlib rectangle to OpenCV bounding box format.
Args:
rect: dlib rectangle object
Returns:
tuple: (x, y, w, h) bounding box coordinates in OpenCV format
"""
def shape_to_np(shape, dtype="int"):
"""
Convert dlib shape object to numpy array of coordinates.
Args:
shape: dlib shape object containing facial landmarks
dtype (str): NumPy data type (default: "int")
Returns:
np.ndarray: Array of (x, y) coordinates with shape (num_points, 2)
"""Usage Example:
import cv2
import dlib
from imutils.face_utils import rect_to_bb, shape_to_np
# Initialize detector and predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
image = cv2.imread("face.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = detector(gray, 1)
for face in faces:
# Convert dlib rectangle to OpenCV bounding box
(x, y, w, h) = rect_to_bb(face)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Get facial landmarks
shape = predictor(gray, face)
landmarks = shape_to_np(shape)
# Draw landmarks
for (x, y) in landmarks:
cv2.circle(image, (x, y), 2, (0, 0, 255), -1)
cv2.imshow("Face Detection", image)
cv2.waitKey(0)
cv2.destroyAllWindows()Visualization utilities for drawing facial landmark regions on images.
def visualize_facial_landmarks(image, shape, colors=None, alpha=0.75):
"""
Draw facial landmark regions on image.
Args:
image (np.ndarray): Input image (OpenCV format)
shape (np.ndarray): Array of facial landmark coordinates
colors (list, optional): List of BGR color tuples for each region
alpha (float): Transparency level for overlay (default: 0.75)
Returns:
np.ndarray: Output image with facial landmarks visualized
Note:
Automatically determines landmark format (68-point or 5-point).
Uses predefined colors if colors parameter is not provided.
Draws polygons for regions like jaw, eyebrows, eyes, nose, mouth.
"""Usage Example:
import cv2
import dlib
from imutils.face_utils import shape_to_np, visualize_facial_landmarks
# Initialize detector and predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
image = cv2.imread("face.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = detector(gray, 1)
for face in faces:
# Get facial landmarks
shape = predictor(gray, face)
landmarks = shape_to_np(shape)
# Visualize landmarks
output = visualize_facial_landmarks(image, landmarks, alpha=0.6)
cv2.imshow("Facial Landmarks", output)
cv2.waitKey(0)
cv2.destroyAllWindows()Pre-defined mappings for facial landmark regions to array indices.
# For 68-point facial landmark detector
FACIAL_LANDMARKS_68_IDXS = {
"mouth": (48, 68),
"inner_mouth": (60, 68),
"right_eyebrow": (17, 22),
"left_eyebrow": (22, 27),
"right_eye": (36, 42),
"left_eye": (42, 48),
"nose": (27, 36),
"jaw": (0, 17)
}
# For 5-point facial landmark detector
FACIAL_LANDMARKS_5_IDXS = {
"right_eye": (0, 1),
"left_eye": (1, 2),
"nose": (2, 5)
}
# Legacy alias
FACIAL_LANDMARKS_IDXS = FACIAL_LANDMARKS_68_IDXSUsage Example:
import cv2
import dlib
from imutils.face_utils import (shape_to_np, FACIAL_LANDMARKS_68_IDXS,
visualize_facial_landmarks)
# Initialize detector and predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
image = cv2.imread("face.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1)
for face in faces:
shape = predictor(gray, face)
landmarks = shape_to_np(shape)
# Extract specific facial regions
(j, k) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
left_eye = landmarks[j:k]
(j, k) = FACIAL_LANDMARKS_68_IDXS["right_eye"]
right_eye = landmarks[j:k]
(j, k) = FACIAL_LANDMARKS_68_IDXS["mouth"]
mouth = landmarks[j:k]
# Draw specific regions
for (x, y) in left_eye:
cv2.circle(image, (x, y), 2, (0, 255, 0), -1) # Green for left eye
for (x, y) in right_eye:
cv2.circle(image, (x, y), 2, (255, 0, 0), -1) # Blue for right eye
for (x, y) in mouth:
cv2.circle(image, (x, y), 2, (0, 0, 255), -1) # Red for mouth
cv2.imshow("Facial Regions", image)
cv2.waitKey(0)
cv2.destroyAllWindows()Here's a comprehensive example combining face detection, alignment, and landmark visualization:
import cv2
import dlib
import numpy as np
from imutils.face_utils import (FaceAligner, rect_to_bb, shape_to_np,
visualize_facial_landmarks, FACIAL_LANDMARKS_68_IDXS)
def process_faces_in_image(image_path, predictor_path):
# Initialize dlib components
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
fa = FaceAligner(predictor, desiredFaceWidth=256)
# Load image
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
original = image.copy()
# Detect faces
faces = detector(gray, 1)
print(f"Found {len(faces)} faces")
aligned_faces = []
for (i, face) in enumerate(faces):
# Convert dlib rectangle to bounding box
(x, y, w, h) = rect_to_bb(face)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(image, f"Face {i+1}", (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Get facial landmarks
shape = predictor(gray, face)
landmarks = shape_to_np(shape)
# Visualize landmarks
landmark_image = visualize_facial_landmarks(original.copy(), landmarks)
# Align face
aligned_face = fa.align(original, gray, face)
aligned_faces.append(aligned_face)
# Show results for this face
cv2.imshow(f"Face {i+1} - Landmarks", landmark_image)
cv2.imshow(f"Face {i+1} - Aligned", aligned_face)
# Show original with bounding boxes
cv2.imshow("Face Detection", image)
# Create montage of aligned faces if multiple faces found
if len(aligned_faces) > 1:
# Resize all faces to same size
aligned_resized = [cv2.resize(face, (256, 256)) for face in aligned_faces]
# Create horizontal montage
montage = np.hstack(aligned_resized)
cv2.imshow("Aligned Faces Montage", montage)
cv2.waitKey(0)
cv2.destroyAllWindows()
return aligned_faces
# Usage
if __name__ == "__main__":
aligned_faces = process_faces_in_image(
"group_photo.jpg",
"shape_predictor_68_face_landmarks.dat"
)
print(f"Processed and aligned {len(aligned_faces)} faces")Example of custom face processing with eye aspect ratio calculation:
import cv2
import dlib
import numpy as np
from imutils.face_utils import shape_to_np, FACIAL_LANDMARKS_68_IDXS
def eye_aspect_ratio(eye):
"""Calculate eye aspect ratio for blink detection."""
# Vertical eye landmarks
A = np.linalg.norm(eye[1] - eye[5])
B = np.linalg.norm(eye[2] - eye[4])
# Horizontal eye landmark
C = np.linalg.norm(eye[0] - eye[3])
# Eye aspect ratio
ear = (A + B) / (2.0 * C)
return ear
def detect_blinks(image_path, predictor_path):
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
# Eye aspect ratio threshold
EAR_THRESHOLD = 0.3
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1)
for face in faces:
landmarks = shape_to_np(predictor(gray, face))
# Extract eye coordinates
(lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]
leftEye = landmarks[lStart:lEnd]
rightEye = landmarks[rStart:rEnd]
# Calculate eye aspect ratios
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
# Check for blink
if ear < EAR_THRESHOLD:
cv2.putText(image, "BLINK DETECTED", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# Draw eye contours
cv2.drawContours(image, [cv2.convexHull(leftEye)], -1, (0, 255, 0), 1)
cv2.drawContours(image, [cv2.convexHull(rightEye)], -1, (0, 255, 0), 1)
# Display EAR value
cv2.putText(image, f"EAR: {ear:.2f}", (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
cv2.imshow("Blink Detection", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Usage
detect_blinks("face.jpg", "shape_predictor_68_face_landmarks.dat")Install with Tessl CLI
npx tessl i tessl/pypi-imutilsevals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9
scenario-10