A comprehensive 2D and 3D face analysis toolkit with state-of-the-art algorithms for face recognition, detection, and alignment.
—
High-level interface for complete face analysis that orchestrates multiple specialized models to provide unified face detection, recognition, landmark detection, and attribute prediction capabilities.
The primary interface for face analysis tasks. Automatically loads and manages multiple models based on the specified model pack.
class FaceAnalysis:
def __init__(self, name='buffalo_l', root='~/.insightface', allowed_modules=None, **kwargs):
"""
Initialize face analysis pipeline.
Parameters:
- name: str, model pack name (default: 'buffalo_l')
- root: str, model storage directory (default: '~/.insightface')
- allowed_modules: list, restrict to specific model types (e.g., ['detection', 'recognition'])
- **kwargs: additional arguments passed to model initialization
"""
def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
"""
Prepare models for inference.
Parameters:
- ctx_id: int, context ID for device selection (0 for CPU, 0+ for GPU)
- det_thresh: float, detection confidence threshold (0.0-1.0)
- det_size: tuple, detection input size (width, height)
"""
def get(self, img, max_num=0) -> List[Face]:
"""
Detect and analyze faces in image.
Parameters:
- img: np.ndarray, input image in BGR format
- max_num: int, maximum number of faces to detect (0 for unlimited)
Returns:
List of Face objects with analysis results
"""
def draw_on(self, img, faces) -> np.ndarray:
"""
Draw face detection results on image.
Parameters:
- img: np.ndarray, input image
- faces: list, Face objects to draw
Returns:
np.ndarray: image with drawn bounding boxes and landmarks
"""Container for face analysis results that accumulates data from multiple models.
class Face(dict):
def __init__(self, d=None, **kwargs):
"""
Initialize Face object.
Parameters:
- d: dict, initial data dictionary
- **kwargs: additional face attributes
"""
@property
def embedding_norm -> float:
"""L2 norm of the face embedding vector."""
@property
def normed_embedding -> np.ndarray:
"""Normalized face embedding (unit vector)."""
@property
def sex -> str:
"""Gender as string: 'M' for male, 'F' for female."""The Face object dynamically accumulates attributes from different analysis models:
# Detection model attributes
bbox: np.ndarray # Bounding box coordinates [x1, y1, x2, y2]
det_score: float # Detection confidence score (0.0-1.0)
kps: np.ndarray # Facial keypoints, shape (5, 2) for 5-point landmarks
# Recognition model attributes
embedding: np.ndarray # Face embedding vector, typically shape (512,)
# Attribute prediction model attributes
gender: int # Gender prediction: 0=female, 1=male
age: int # Age prediction in years
# Landmark detection model attributes
landmark_2d_68: np.ndarray # 68 2D facial landmarks, shape (68, 2)
landmark_3d_68: np.ndarray # 68 3D facial landmarks, shape (68, 3)
landmark_2d_106: np.ndarray # 106 2D facial landmarks, shape (106, 2)
# Pose estimation attributes
pose: np.ndarray # Head pose angles [pitch, yaw, roll] in degreesimport cv2
from insightface.app import FaceAnalysis
# Initialize with default model pack
app = FaceAnalysis()
app.prepare(ctx_id=0, det_thresh=0.6, det_size=(640, 640))
# Load and analyze image
img = cv2.imread('family_photo.jpg')
faces = app.get(img)
print(f"Found {len(faces)} faces")
for i, face in enumerate(faces):
print(f"Face {i+1}:")
print(f" Age: {face.age}")
print(f" Gender: {face.sex}")
print(f" Confidence: {face.det_score:.3f}")
print(f" Location: {face.bbox}")# Use GPU for inference (requires CUDA-enabled onnxruntime-gpu)
app = FaceAnalysis()
app.prepare(ctx_id=0) # Use first GPU, -1 for CPU
# Process with GPU acceleration
faces = app.get(img)# Load only detection and recognition models
app = FaceAnalysis(allowed_modules=['detection', 'recognition'])
app.prepare(ctx_id=0)
faces = app.get(img)
# faces will have bbox, det_score, and embedding but no age/genderimport os
from pathlib import Path
app = FaceAnalysis()
app.prepare(ctx_id=0)
# Process directory of images
image_dir = Path('photos/')
for img_path in image_dir.glob('*.jpg'):
img = cv2.imread(str(img_path))
faces = app.get(img)
if faces:
# Save visualization
result_img = app.draw_on(img, faces)
output_path = f'results/{img_path.stem}_faces.jpg'
cv2.imwrite(output_path, result_img)
# Extract embeddings for face recognition
embeddings = [face.embedding for face in faces]
print(f"{img_path.name}: {len(faces)} faces, {len(embeddings)} embeddings")# Compare faces between two images
img1 = cv2.imread('person1.jpg')
img2 = cv2.imread('person2.jpg')
faces1 = app.get(img1)
faces2 = app.get(img2)
if faces1 and faces2:
face1 = faces1[0] # First face in image 1
face2 = faces2[0] # First face in image 2
# Compute cosine similarity
embedding1 = face1.normed_embedding
embedding2 = face2.normed_embedding
similarity = np.dot(embedding1, embedding2)
print(f"Face similarity: {similarity:.3f}")
if similarity > 0.6:
print("Likely same person")
else:
print("Likely different people")Install with Tessl CLI
npx tessl i tessl/pypi-insightface