Pretrained PoseNet model in TensorFlow.js for real-time human pose estimation from images and video streams
—
Utility functions for manipulating, scaling, and analyzing detected poses. Includes keypoint relationships, geometric calculations, and coordinate transformations.
Get connected keypoint pairs that meet a minimum confidence threshold, useful for drawing pose skeleton.
/**
* Get pairs of connected keypoints above confidence threshold
* @param keypoints - Array of keypoints from a detected pose
* @param minConfidence - Minimum confidence score for both keypoints in pair
* @returns Array of keypoint pairs representing pose skeleton connections
*/
function getAdjacentKeyPoints(
keypoints: Keypoint[],
minConfidence: number
): Keypoint[][];Usage Examples:
import { getAdjacentKeyPoints } from '@tensorflow-models/posenet';
// Get high-confidence skeleton connections
const pose = await net.estimateSinglePose(imageElement);
const adjacentKeyPoints = getAdjacentKeyPoints(pose.keypoints, 0.7);
// Draw skeleton lines
adjacentKeyPoints.forEach(([pointA, pointB]) => {
drawLine(
pointA.position.x, pointA.position.y,
pointB.position.x, pointB.position.y
);
});
// Count visible connections
const visibleConnections = getAdjacentKeyPoints(pose.keypoints, 0.5);
console.log(`Pose has ${visibleConnections.length} visible connections`);
// Filter by specific body parts
const armConnections = adjacentKeyPoints.filter(([pointA, pointB]) => {
const armParts = ['leftShoulder', 'leftElbow', 'leftWrist', 'rightShoulder', 'rightElbow', 'rightWrist'];
return armParts.includes(pointA.part) && armParts.includes(pointB.part);
});Calculate the bounding box around all keypoints in a pose.
/**
* Calculate bounding box coordinates around pose keypoints
* @param keypoints - Array of keypoints from detected pose
* @returns Bounding box with min/max x/y coordinates
*/
function getBoundingBox(keypoints: Keypoint[]): {
maxX: number;
maxY: number;
minX: number;
minY: number;
};Usage Examples:
import { getBoundingBox } from '@tensorflow-models/posenet';
const pose = await net.estimateSinglePose(imageElement);
const boundingBox = getBoundingBox(pose.keypoints);
// Calculate pose dimensions
const width = boundingBox.maxX - boundingBox.minX;
const height = boundingBox.maxY - boundingBox.minY;
const area = width * height;
console.log(`Pose bounding box: ${width}x${height} pixels, area: ${area}`);
// Draw bounding box rectangle
drawRectangle(
boundingBox.minX, boundingBox.minY,
width, height
);
// Check if pose fits in target area
const targetWidth = 300;
const targetHeight = 400;
const fitsInTarget = width <= targetWidth && height <= targetHeight;
// Center coordinates
const centerX = (boundingBox.minX + boundingBox.maxX) / 2;
const centerY = (boundingBox.minY + boundingBox.maxY) / 2;Get the four corner points of the pose bounding box.
/**
* Get corner points of pose bounding box
* @param keypoints - Array of keypoints from detected pose
* @returns Array of four corner points [top-left, top-right, bottom-right, bottom-left]
*/
function getBoundingBoxPoints(keypoints: Keypoint[]): Vector2D[];Usage Examples:
import { getBoundingBoxPoints } from '@tensorflow-models/posenet';
const pose = await net.estimateSinglePose(imageElement);
const corners = getBoundingBoxPoints(pose.keypoints);
// Draw bounding box outline
for (let i = 0; i < corners.length; i++) {
const current = corners[i];
const next = corners[(i + 1) % corners.length];
drawLine(current.x, current.y, next.x, next.y);
}
// Check if point is inside bounding box
function isPointInBoundingBox(point: Vector2D, corners: Vector2D[]): boolean {
const [topLeft, topRight, bottomRight, bottomLeft] = corners;
return point.x >= topLeft.x && point.x <= topRight.x &&
point.y >= topLeft.y && point.y <= bottomLeft.y;
}
// Create polygon from corners for collision detection
const boundingPolygon = corners.map(corner => [corner.x, corner.y]);Scale pose coordinates with optional offset transformation.
/**
* Scale pose keypoint coordinates with optional offset
* @param pose - Input pose to scale
* @param scaleY - Vertical scaling factor
* @param scaleX - Horizontal scaling factor
* @param offsetY - Vertical offset to add (default: 0)
* @param offsetX - Horizontal offset to add (default: 0)
* @returns New pose with scaled coordinates
*/
function scalePose(
pose: Pose,
scaleY: number,
scaleX: number,
offsetY?: number,
offsetX?: number
): Pose;Usage Examples:
import { scalePose } from '@tensorflow-models/posenet';
const originalPose = await net.estimateSinglePose(imageElement);
// Scale pose to fit different image size
const originalSize = { width: 640, height: 480 };
const targetSize = { width: 320, height: 240 };
const scaleX = targetSize.width / originalSize.width;
const scaleY = targetSize.height / originalSize.height;
const scaledPose = scalePose(originalPose, scaleY, scaleX);
// Scale and center pose in new coordinate system
const canvasWidth = 800;
const canvasHeight = 600;
const centerX = canvasWidth / 2;
const centerY = canvasHeight / 2;
const centeredPose = scalePose(
originalPose,
0.5, // Scale down by half
0.5, // Scale down by half
centerY, // Center vertically
centerX // Center horizontally
);
// Convert from model coordinates to display coordinates
const displayPose = scalePose(
originalPose,
displayHeight / modelHeight,
displayWidth / modelWidth,
displayOffsetY,
displayOffsetX
);Scale and optionally flip multiple poses with padding compensation.
/**
* Scale and optionally flip poses to original image coordinates
* @param poses - Array of poses to transform
* @param dimensions - Original image dimensions [height, width]
* @param inputResolution - Model input resolution [height, width]
* @param padding - Padding applied during preprocessing
* @param flipHorizontal - Whether to flip poses horizontally
* @returns Array of transformed poses
*/
function scaleAndFlipPoses(
poses: Pose[],
dimensions: [number, number],
inputResolution: [number, number],
padding: Padding,
flipHorizontal: boolean
): Pose[];
interface Padding {
top: number;
bottom: number;
left: number;
right: number;
}Usage Examples:
import { scaleAndFlipPoses } from '@tensorflow-models/posenet';
// This function is typically used internally by PoseNet
// but can be useful for custom processing pipelines
const poses = await net.estimateMultiplePoses(imageElement);
const originalDimensions: [number, number] = [imageElement.height, imageElement.width];
const modelInputResolution: [number, number] = [257, 257];
const paddingInfo = { top: 10, bottom: 10, left: 5, right: 5 };
// Transform poses back to original image coordinates
const transformedPoses = scaleAndFlipPoses(
poses,
originalDimensions,
modelInputResolution,
paddingInfo,
false // Don't flip
);
// For webcam processing with horizontal flip
const webcamPoses = scaleAndFlipPoses(
poses,
[video.videoHeight, video.videoWidth],
[257, 257],
padding,
true // Flip for webcam
);Get the current package version.
/**
* PoseNet package version string
*/
const version: string;Usage Example:
import { version } from '@tensorflow-models/posenet';
console.log('PoseNet version:', version);
// Version checking for compatibility
const requiredVersion = '2.2.0';
const isCompatible = compareVersions(version, requiredVersion) >= 0;interface Vector2D {
x: number;
y: number;
}
interface Keypoint {
score: number;
position: Vector2D;
part: string;
}
interface Pose {
keypoints: Keypoint[];
score: number;
}
interface Padding {
top: number;
bottom: number;
left: number;
right: number;
}Install with Tessl CLI
npx tessl i tessl/npm-tensorflow-models--posenet