0
# Pose Processing and Utilities
1
2
Utility functions for manipulating, scaling, and analyzing detected poses. Includes keypoint relationships, geometric calculations, and coordinate transformations.
3
4
## Capabilities
5
6
### Adjacent Keypoints
7
8
Get connected keypoint pairs that meet a minimum confidence threshold, useful for drawing pose skeleton.
9
10
```typescript { .api }
11
/**
12
* Get pairs of connected keypoints above confidence threshold
13
* @param keypoints - Array of keypoints from a detected pose
14
* @param minConfidence - Minimum confidence score for both keypoints in pair
15
* @returns Array of keypoint pairs representing pose skeleton connections
16
*/
17
function getAdjacentKeyPoints(
18
keypoints: Keypoint[],
19
minConfidence: number
20
): Keypoint[][];
21
```
22
23
**Usage Examples:**
24
25
```typescript
26
import { getAdjacentKeyPoints } from '@tensorflow-models/posenet';
27
28
// Get high-confidence skeleton connections
29
const pose = await net.estimateSinglePose(imageElement);
30
const adjacentKeyPoints = getAdjacentKeyPoints(pose.keypoints, 0.7);
31
32
// Draw skeleton lines
33
adjacentKeyPoints.forEach(([pointA, pointB]) => {
34
drawLine(
35
pointA.position.x, pointA.position.y,
36
pointB.position.x, pointB.position.y
37
);
38
});
39
40
// Count visible connections
41
const visibleConnections = getAdjacentKeyPoints(pose.keypoints, 0.5);
42
console.log(`Pose has ${visibleConnections.length} visible connections`);
43
44
// Filter by specific body parts
45
const armConnections = adjacentKeyPoints.filter(([pointA, pointB]) => {
46
const armParts = ['leftShoulder', 'leftElbow', 'leftWrist', 'rightShoulder', 'rightElbow', 'rightWrist'];
47
return armParts.includes(pointA.part) && armParts.includes(pointB.part);
48
});
49
```
50
51
### Bounding Box Calculation
52
53
Calculate the bounding box around all keypoints in a pose.
54
55
```typescript { .api }
56
/**
57
* Calculate bounding box coordinates around pose keypoints
58
* @param keypoints - Array of keypoints from detected pose
59
* @returns Bounding box with min/max x/y coordinates
60
*/
61
function getBoundingBox(keypoints: Keypoint[]): {
62
maxX: number;
63
maxY: number;
64
minX: number;
65
minY: number;
66
};
67
```
68
69
**Usage Examples:**
70
71
```typescript
72
import { getBoundingBox } from '@tensorflow-models/posenet';
73
74
const pose = await net.estimateSinglePose(imageElement);
75
const boundingBox = getBoundingBox(pose.keypoints);
76
77
// Calculate pose dimensions
78
const width = boundingBox.maxX - boundingBox.minX;
79
const height = boundingBox.maxY - boundingBox.minY;
80
const area = width * height;
81
82
console.log(`Pose bounding box: ${width}x${height} pixels, area: ${area}`);
83
84
// Draw bounding box rectangle
85
drawRectangle(
86
boundingBox.minX, boundingBox.minY,
87
width, height
88
);
89
90
// Check if pose fits in target area
91
const targetWidth = 300;
92
const targetHeight = 400;
93
const fitsInTarget = width <= targetWidth && height <= targetHeight;
94
95
// Center coordinates
96
const centerX = (boundingBox.minX + boundingBox.maxX) / 2;
97
const centerY = (boundingBox.minY + boundingBox.maxY) / 2;
98
```
99
100
### Bounding Box Points
101
102
Get the four corner points of the pose bounding box.
103
104
```typescript { .api }
105
/**
106
* Get corner points of pose bounding box
107
* @param keypoints - Array of keypoints from detected pose
108
* @returns Array of four corner points [top-left, top-right, bottom-right, bottom-left]
109
*/
110
function getBoundingBoxPoints(keypoints: Keypoint[]): Vector2D[];
111
```
112
113
**Usage Examples:**
114
115
```typescript
116
import { getBoundingBoxPoints } from '@tensorflow-models/posenet';
117
118
const pose = await net.estimateSinglePose(imageElement);
119
const corners = getBoundingBoxPoints(pose.keypoints);
120
121
// Draw bounding box outline
122
for (let i = 0; i < corners.length; i++) {
123
const current = corners[i];
124
const next = corners[(i + 1) % corners.length];
125
drawLine(current.x, current.y, next.x, next.y);
126
}
127
128
// Check if point is inside bounding box
129
function isPointInBoundingBox(point: Vector2D, corners: Vector2D[]): boolean {
130
const [topLeft, topRight, bottomRight, bottomLeft] = corners;
131
return point.x >= topLeft.x && point.x <= topRight.x &&
132
point.y >= topLeft.y && point.y <= bottomLeft.y;
133
}
134
135
// Create polygon from corners for collision detection
136
const boundingPolygon = corners.map(corner => [corner.x, corner.y]);
137
```
138
139
### Pose Scaling
140
141
Scale pose coordinates with optional offset transformation.
142
143
```typescript { .api }
144
/**
145
* Scale pose keypoint coordinates with optional offset
146
* @param pose - Input pose to scale
147
* @param scaleY - Vertical scaling factor
148
* @param scaleX - Horizontal scaling factor
149
* @param offsetY - Vertical offset to add (default: 0)
150
* @param offsetX - Horizontal offset to add (default: 0)
151
* @returns New pose with scaled coordinates
152
*/
153
function scalePose(
154
pose: Pose,
155
scaleY: number,
156
scaleX: number,
157
offsetY?: number,
158
offsetX?: number
159
): Pose;
160
```
161
162
**Usage Examples:**
163
164
```typescript
165
import { scalePose } from '@tensorflow-models/posenet';
166
167
const originalPose = await net.estimateSinglePose(imageElement);
168
169
// Scale pose to fit different image size
170
const originalSize = { width: 640, height: 480 };
171
const targetSize = { width: 320, height: 240 };
172
const scaleX = targetSize.width / originalSize.width;
173
const scaleY = targetSize.height / originalSize.height;
174
175
const scaledPose = scalePose(originalPose, scaleY, scaleX);
176
177
// Scale and center pose in new coordinate system
178
const canvasWidth = 800;
179
const canvasHeight = 600;
180
const centerX = canvasWidth / 2;
181
const centerY = canvasHeight / 2;
182
183
const centeredPose = scalePose(
184
originalPose,
185
0.5, // Scale down by half
186
0.5, // Scale down by half
187
centerY, // Center vertically
188
centerX // Center horizontally
189
);
190
191
// Convert from model coordinates to display coordinates
192
const displayPose = scalePose(
193
originalPose,
194
displayHeight / modelHeight,
195
displayWidth / modelWidth,
196
displayOffsetY,
197
displayOffsetX
198
);
199
```
200
201
### Multi-Pose Scaling and Flipping
202
203
Scale and optionally flip multiple poses with padding compensation.
204
205
```typescript { .api }
206
/**
207
* Scale and optionally flip poses to original image coordinates
208
* @param poses - Array of poses to transform
209
* @param dimensions - Original image dimensions [height, width]
210
* @param inputResolution - Model input resolution [height, width]
211
* @param padding - Padding applied during preprocessing
212
* @param flipHorizontal - Whether to flip poses horizontally
213
* @returns Array of transformed poses
214
*/
215
function scaleAndFlipPoses(
216
poses: Pose[],
217
dimensions: [number, number],
218
inputResolution: [number, number],
219
padding: Padding,
220
flipHorizontal: boolean
221
): Pose[];
222
223
interface Padding {
224
top: number;
225
bottom: number;
226
left: number;
227
right: number;
228
}
229
```
230
231
**Usage Examples:**
232
233
```typescript
234
import { scaleAndFlipPoses } from '@tensorflow-models/posenet';
235
236
// This function is typically used internally by PoseNet
237
// but can be useful for custom processing pipelines
238
239
const poses = await net.estimateMultiplePoses(imageElement);
240
const originalDimensions: [number, number] = [imageElement.height, imageElement.width];
241
const modelInputResolution: [number, number] = [257, 257];
242
const paddingInfo = { top: 10, bottom: 10, left: 5, right: 5 };
243
244
// Transform poses back to original image coordinates
245
const transformedPoses = scaleAndFlipPoses(
246
poses,
247
originalDimensions,
248
modelInputResolution,
249
paddingInfo,
250
false // Don't flip
251
);
252
253
// For webcam processing with horizontal flip
254
const webcamPoses = scaleAndFlipPoses(
255
poses,
256
[video.videoHeight, video.videoWidth],
257
[257, 257],
258
padding,
259
true // Flip for webcam
260
);
261
```
262
263
### Version Information
264
265
Get the current package version.
266
267
```typescript { .api }
268
/**
269
* PoseNet package version string
270
*/
271
const version: string;
272
```
273
274
**Usage Example:**
275
276
```typescript
277
import { version } from '@tensorflow-models/posenet';
278
279
console.log('PoseNet version:', version);
280
281
// Version checking for compatibility
282
const requiredVersion = '2.2.0';
283
const isCompatible = compareVersions(version, requiredVersion) >= 0;
284
```
285
286
## Supporting Types
287
288
```typescript { .api }
289
interface Vector2D {
290
x: number;
291
y: number;
292
}
293
294
interface Keypoint {
295
score: number;
296
position: Vector2D;
297
part: string;
298
}
299
300
interface Pose {
301
keypoints: Keypoint[];
302
score: number;
303
}
304
305
interface Padding {
306
top: number;
307
bottom: number;
308
left: number;
309
right: number;
310
}
311
```