Real-time person and body part segmentation using TensorFlow.js for web browsers with machine learning models.
—
Advanced body part segmentation providing pixel-level classification of 24 distinct human body parts. Enables fine-grained analysis and effects targeting specific anatomical regions like faces, hands, torso, and limbs.
Segments all body parts for all people in the image into a single combined mask.
/**
* Segments body parts for all people in the image
* @param input - Image input (ImageData, HTMLImageElement, HTMLCanvasElement, HTMLVideoElement, OffscreenCanvas, tf.Tensor3D)
* @param config - Optional inference configuration
* @returns Promise resolving to semantic part segmentation result
*/
segmentPersonParts(
input: BodyPixInput,
config?: PersonInferenceConfig
): Promise<SemanticPartSegmentation>;
interface SemanticPartSegmentation {
/** Part IDs (0-23) for each pixel, -1 for background */
data: Int32Array;
/** Mask width in pixels */
width: number;
/** Mask height in pixels */
height: number;
/** Array of all detected poses */
allPoses: Pose[];
}Segments body parts for multiple people individually, providing separate part masks for each detected person.
/**
* Segments body parts for multiple people individually
* @param input - Image input
* @param config - Optional multi-person inference configuration
* @returns Promise resolving to array of individual part segmentations
*/
segmentMultiPersonParts(
input: BodyPixInput,
config?: MultiPersonInstanceInferenceConfig
): Promise<PartSegmentation[]>;
interface PartSegmentation {
/** Part IDs (0-23) for each pixel, -1 for background */
data: Int32Array;
/** Mask width in pixels */
width: number;
/** Mask height in pixels */
height: number;
/** Pose keypoints for this person */
pose: Pose;
}Usage Examples:
import * as bodyPix from '@tensorflow-models/body-pix';
const net = await bodyPix.load();
const imageElement = document.getElementById('person-image');
// Semantic part segmentation for all people
const partSegmentation = await net.segmentPersonParts(imageElement);
// Count pixels for each body part
const partCounts = new Array(24).fill(0);
for (let i = 0; i < partSegmentation.data.length; i++) {
const partId = partSegmentation.data[i];
if (partId >= 0 && partId < 24) {
partCounts[partId]++;
}
}
// Multi-person part segmentation
const peoplePartSegmentations = await net.segmentMultiPersonParts(imageElement, {
maxDetections: 3,
scoreThreshold: 0.5
});
console.log(`Detected ${peoplePartSegmentations.length} people with body parts`);BodyPix identifies 24 distinct body parts using the following ID mapping:
const PART_CHANNELS: string[] = [
'left_face', // 0
'right_face', // 1
'left_upper_arm_front', // 2
'left_upper_arm_back', // 3
'right_upper_arm_front', // 4
'right_upper_arm_back', // 5
'left_lower_arm_front', // 6
'left_lower_arm_back', // 7
'right_lower_arm_front', // 8
'right_lower_arm_back', // 9
'left_hand', // 10
'right_hand', // 11
'torso_front', // 12
'torso_back', // 13
'left_upper_leg_front', // 14
'left_upper_leg_back', // 15
'right_upper_leg_front', // 16
'right_upper_leg_back', // 17
'left_lower_leg_front', // 18
'left_lower_leg_back', // 19
'right_lower_leg_front', // 20
'right_lower_leg_back', // 21
'left_feet', // 22
'right_feet' // 23
];Face Parts: 0-1 (left_face, right_face)
Hand Parts: 10-11 (left_hand, right_hand)
Arm Parts: 2-9 (upper/lower arms, front/back)
Torso Parts: 12-13 (torso_front, torso_back)
Leg Parts: 14-21 (upper/lower legs, front/back)
Feet Parts: 22-23 (left_feet, right_feet)
import { PART_CHANNELS } from '@tensorflow-models/body-pix';
// Blur faces for privacy (parts 0 and 1)
const FACE_PARTS = [0, 1];
const partSegmentation = await net.segmentPersonParts(imageElement);
// Check if faces are detected
const hasFaces = partSegmentation.data.some(partId => FACE_PARTS.includes(partId));
if (hasFaces) {
// Apply face blur effect
bodyPix.blurBodyPart(canvas, imageElement, partSegmentation, FACE_PARTS, 15);
}
// Create hand-only mask
const HAND_PARTS = [10, 11]; // left_hand, right_hand
const handMask = bodyPix.toMask(partSegmentation,
{ r: 255, g: 255, b: 255, a: 255 }, // white hands
{ r: 0, g: 0, b: 0, a: 0 }, // transparent background
false,
HAND_PARTS
);function analyzeBodyParts(partSegmentation: SemanticPartSegmentation) {
const { data, width, height } = partSegmentation;
const totalPixels = width * height;
const partStats = new Array(24).fill(0);
// Count pixels for each part
for (let i = 0; i < data.length; i++) {
const partId = data[i];
if (partId >= 0 && partId < 24) {
partStats[partId]++;
}
}
// Calculate coverage percentages
const partCoverage = partStats.map((count, partId) => ({
partName: PART_CHANNELS[partId],
pixelCount: count,
coveragePercent: (count / totalPixels) * 100
}));
return partCoverage.filter(part => part.pixelCount > 0);
}
const partAnalysis = analyzeBodyParts(partSegmentation);
console.log('Visible body parts:', partAnalysis);// Create colored visualization of all parts
const coloredPartMask = bodyPix.toColoredPartMask(partSegmentation);
// Create mask for upper body only (torso + arms + hands + face)
const UPPER_BODY_PARTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13];
const upperBodyMask = bodyPix.toMask(partSegmentation,
{ r: 0, g: 255, b: 0, a: 255 }, // green foreground
{ r: 0, g: 0, b: 0, a: 0 }, // transparent background
false,
UPPER_BODY_PARTS
);Install with Tessl CLI
npx tessl i tessl/npm-tensorflow-models--body-pix