or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

callbacks.mdimage-processing.mdindex.mdio.mdsavedmodel.mdtensorboard.md

savedmodel.mddocs/

0

# SavedModel Support

1

2

TensorFlow.js Node provides comprehensive support for loading and running TensorFlow SavedModels, enabling seamless integration with models trained in Python TensorFlow. This allows you to deploy Python-trained models in Node.js environments for high-performance inference.

3

4

## Capabilities

5

6

### Loading SavedModels

7

8

#### Load SavedModel

9

10

Load a TensorFlow SavedModel from the file system for inference.

11

12

```typescript { .api }

13

/**

14

* Load a TensorFlow SavedModel for inference

15

* @param path - Path to the SavedModel directory

16

* @param tags - Model tags to load (default: ['serve'])

17

* @param signature - Signature to use for inference (default: 'serving_default')

18

* @returns Promise resolving to TFSavedModel instance

19

*/

20

function loadSavedModel(

21

path: string,

22

tags?: string[],

23

signature?: string

24

): Promise<TFSavedModel>;

25

```

26

27

**Usage Example:**

28

29

```typescript

30

import * as tf from '@tensorflow/tfjs-node';

31

32

// Load a SavedModel

33

const model = await tf.node.loadSavedModel('./path/to/saved_model');

34

35

console.log('Model inputs:', model.inputs);

36

console.log('Model outputs:', model.outputs);

37

38

// Run inference

39

const inputTensor = tf.tensor2d([[1.0, 2.0, 3.0, 4.0]]);

40

const prediction = model.predict(inputTensor) as Tensor;

41

42

console.log('Prediction:');

43

prediction.print();

44

45

// Clean up

46

inputTensor.dispose();

47

prediction.dispose();

48

model.dispose();

49

```

50

51

#### Load with Specific Tags and Signature

52

53

```typescript

54

// Load model with specific serving tags

55

const model = await tf.node.loadSavedModel(

56

'./my_model',

57

['serve', 'gpu'], // Use GPU-optimized version if available

58

'predict' // Use 'predict' signature instead of default

59

);

60

```

61

62

### SavedModel Inspection

63

64

#### Get MetaGraphs Information

65

66

Inspect available MetaGraphs, tags, and signatures in a SavedModel without loading it.

67

68

```typescript { .api }

69

/**

70

* Get metadata about a SavedModel's available configurations

71

* @param path - Path to the SavedModel directory

72

* @returns Promise resolving to array of MetaGraph information

73

*/

74

function getMetaGraphsFromSavedModel(path: string): Promise<MetaGraph[]>;

75

```

76

77

**Usage Example:**

78

79

```typescript

80

// Inspect SavedModel before loading

81

const metaGraphs = await tf.node.getMetaGraphsFromSavedModel('./my_model');

82

83

console.log('Available MetaGraphs:');

84

metaGraphs.forEach((metaGraph, index) => {

85

console.log(`MetaGraph ${index}:`);

86

console.log(' Tags:', metaGraph.tags);

87

console.log(' Signatures:', Object.keys(metaGraph.signatureDef));

88

89

// Show signature details

90

Object.entries(metaGraph.signatureDef).forEach(([sigName, sigDef]) => {

91

console.log(` Signature "${sigName}":`);

92

console.log(' Inputs:', Object.keys(sigDef.inputs));

93

console.log(' Outputs:', Object.keys(sigDef.outputs));

94

});

95

});

96

97

// Load model with discovered tags and signature

98

if (metaGraphs.length > 0) {

99

const firstMetaGraph = metaGraphs[0];

100

const availableTags = firstMetaGraph.tags;

101

const availableSignatures = Object.keys(firstMetaGraph.signatureDef);

102

103

const model = await tf.node.loadSavedModel(

104

'./my_model',

105

availableTags,

106

availableSignatures[0]

107

);

108

}

109

```

110

111

### Model Management

112

113

#### Get Number of Loaded Models

114

115

Track the number of SavedModels currently loaded in memory.

116

117

```typescript { .api }

118

/**

119

* Get the number of currently loaded SavedModels

120

* @returns Number of loaded SavedModel instances

121

*/

122

function getNumOfSavedModels(): number;

123

```

124

125

**Usage Example:**

126

127

```typescript

128

console.log('Initially loaded models:', tf.node.getNumOfSavedModels()); // 0

129

130

const model1 = await tf.node.loadSavedModel('./model1');

131

console.log('After loading model1:', tf.node.getNumOfSavedModels()); // 1

132

133

const model2 = await tf.node.loadSavedModel('./model2');

134

console.log('After loading model2:', tf.node.getNumOfSavedModels()); // 2

135

136

model1.dispose();

137

console.log('After disposing model1:', tf.node.getNumOfSavedModels()); // 1

138

139

model2.dispose();

140

console.log('After disposing model2:', tf.node.getNumOfSavedModels()); // 0

141

```

142

143

## TFSavedModel Interface

144

145

The loaded SavedModel implements the `InferenceModel` interface with additional SavedModel-specific properties.

146

147

```typescript { .api }

148

interface TFSavedModel extends InferenceModel {

149

/** Input tensor specifications */

150

inputs: ModelTensorInfo;

151

152

/** Output tensor specifications */

153

outputs: ModelTensorInfo;

154

155

/** Run inference on input data */

156

predict(

157

inputs: Tensor | Tensor[] | NamedTensorMap,

158

config?: PredictConfig

159

): Tensor | Tensor[] | NamedTensorMap;

160

161

/** Get intermediate activations (not yet implemented) */

162

execute(

163

inputs: Tensor | Tensor[] | NamedTensorMap,

164

outputs: string | string[]

165

): Tensor | Tensor[];

166

167

/** Release model resources */

168

dispose(): void;

169

}

170

171

interface ModelTensorInfo {

172

[inputName: string]: {

173

name: string;

174

shape: number[];

175

dtype: string;

176

};

177

}

178

179

interface PredictConfig {

180

batchSize?: number;

181

verbose?: boolean;

182

}

183

```

184

185

## SavedModel Metadata Types

186

187

```typescript { .api }

188

interface MetaGraph {

189

/** Tags associated with this MetaGraph */

190

tags: string[];

191

192

/** Available signature definitions */

193

signatureDef: {[key: string]: SignatureDefEntry};

194

}

195

196

interface SignatureDefEntry {

197

/** Input tensor specifications */

198

inputs: {[key: string]: TensorInfo};

199

200

/** Output tensor specifications */

201

outputs: {[key: string]: TensorInfo};

202

203

/** Method name */

204

methodName: string;

205

}

206

207

interface TensorInfo {

208

/** Tensor name in the graph */

209

name: string;

210

211

/** Tensor shape (-1 for dynamic dimensions) */

212

shape: number[];

213

214

/** Data type */

215

dtype: string;

216

}

217

```

218

219

## Common Usage Patterns

220

221

### Image Classification Model

222

223

```typescript

224

import * as tf from '@tensorflow/tfjs-node';

225

import * as fs from 'fs';

226

227

async function classifyImage(modelPath: string, imagePath: string) {

228

// Load the SavedModel

229

const model = await tf.node.loadSavedModel(modelPath);

230

231

// Check model input requirements

232

console.log('Model expects inputs:', model.inputs);

233

234

// Load and preprocess image

235

const imageBuffer = fs.readFileSync(imagePath);

236

const imageArray = new Uint8Array(imageBuffer);

237

const imageTensor = tf.node.decodeImage(imageArray, 3);

238

239

// Resize to model input size (assuming 224x224)

240

const resized = tf.image.resizeBilinear(imageTensor, [224, 224]);

241

242

// Normalize and add batch dimension

243

const normalized = resized.div(255.0).expandDims(0);

244

245

// Run inference

246

const predictions = model.predict(normalized) as Tensor;

247

248

// Get top prediction

249

const topK = tf.topk(predictions, 5);

250

const indices = await topK.indices.data();

251

const values = await topK.values.data();

252

253

console.log('Top 5 predictions:');

254

for (let i = 0; i < 5; i++) {

255

console.log(` Class ${indices[i]}: ${values[i].toFixed(4)}`);

256

}

257

258

// Clean up

259

imageTensor.dispose();

260

resized.dispose();

261

normalized.dispose();

262

predictions.dispose();

263

topK.indices.dispose();

264

topK.values.dispose();

265

model.dispose();

266

}

267

268

// Usage

269

classifyImage('./image_classifier_model', './test_image.jpg');

270

```

271

272

### Text Processing Model

273

274

```typescript

275

async function processText(modelPath: string, texts: string[]) {

276

const model = await tf.node.loadSavedModel(modelPath);

277

278

// Assume the model expects tokenized input

279

// (In practice, you'd use a proper tokenizer)

280

const tokenized = texts.map(text =>

281

text.split(' ').map(word => word.charCodeAt(0) % 1000)

282

);

283

284

// Pad sequences to same length

285

const maxLen = Math.max(...tokenized.map(seq => seq.length));

286

const padded = tokenized.map(seq => [

287

...seq,

288

...Array(maxLen - seq.length).fill(0)

289

]);

290

291

// Convert to tensor

292

const inputTensor = tf.tensor2d(padded);

293

294

// Run inference

295

const outputs = model.predict(inputTensor) as Tensor;

296

297

console.log('Text processing results:');

298

outputs.print();

299

300

// Clean up

301

inputTensor.dispose();

302

outputs.dispose();

303

model.dispose();

304

}

305

```

306

307

### Named Input/Output Model

308

309

```typescript

310

async function useNamedInputsOutputs(modelPath: string) {

311

const model = await tf.node.loadSavedModel(modelPath);

312

313

// Check input/output names

314

console.log('Input names:', Object.keys(model.inputs));

315

console.log('Output names:', Object.keys(model.outputs));

316

317

// Create named inputs

318

const namedInputs = {

319

'input_1': tf.randomNormal([1, 10]),

320

'input_2': tf.randomNormal([1, 20])

321

};

322

323

// Run prediction with named inputs

324

const namedOutputs = model.predict(namedInputs) as NamedTensorMap;

325

326

// Access outputs by name

327

console.log('Output_1 shape:', namedOutputs['output_1'].shape);

328

console.log('Output_2 shape:', namedOutputs['output_2'].shape);

329

330

// Clean up

331

Object.values(namedInputs).forEach(tensor => tensor.dispose());

332

Object.values(namedOutputs).forEach(tensor => tensor.dispose());

333

model.dispose();

334

}

335

```

336

337

### Batch Processing

338

339

```typescript

340

async function batchProcess(modelPath: string, batchSize: number = 32) {

341

const model = await tf.node.loadSavedModel(modelPath);

342

343

// Create batch of dummy data

344

const batchInput = tf.randomNormal([batchSize, 224, 224, 3]);

345

346

// Process entire batch at once

347

const batchOutput = model.predict(batchInput, {

348

batchSize: batchSize,

349

verbose: true

350

}) as Tensor;

351

352

console.log('Batch input shape:', batchInput.shape);

353

console.log('Batch output shape:', batchOutput.shape);

354

355

// Process results

356

const results = await batchOutput.data();

357

console.log(`Processed ${batchSize} samples`);

358

359

// Clean up

360

batchInput.dispose();

361

batchOutput.dispose();

362

model.dispose();

363

}

364

```

365

366

## Error Handling

367

368

```typescript

369

async function robustModelLoading(modelPath: string) {

370

try {

371

// First, inspect the model to understand its structure

372

const metaGraphs = await tf.node.getMetaGraphsFromSavedModel(modelPath);

373

374

if (metaGraphs.length === 0) {

375

throw new Error('No MetaGraphs found in SavedModel');

376

}

377

378

// Choose appropriate tags and signature

379

const metaGraph = metaGraphs[0];

380

const tags = metaGraph.tags;

381

const signatures = Object.keys(metaGraph.signatureDef);

382

383

if (signatures.length === 0) {

384

throw new Error('No signatures found in MetaGraph');

385

}

386

387

// Load the model

388

const model = await tf.node.loadSavedModel(modelPath, tags, signatures[0]);

389

390

console.log('Model loaded successfully');

391

console.log('Available signatures:', signatures);

392

393

return model;

394

395

} catch (error) {

396

console.error('Error loading SavedModel:', error.message);

397

398

// Check if path exists

399

if (!fs.existsSync(modelPath)) {

400

console.error('Model path does not exist:', modelPath);

401

}

402

403

// Check if it's a valid SavedModel directory

404

const pbFile = path.join(modelPath, 'saved_model.pb');

405

if (!fs.existsSync(pbFile)) {

406

console.error('saved_model.pb not found. Not a valid SavedModel.');

407

}

408

409

throw error;

410

}

411

}

412

```