CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-tensorflow--tfjs-node

TensorFlow backend for TensorFlow.js via Node.js - provides native TensorFlow execution in backend JavaScript applications under the Node.js runtime, accelerated by the TensorFlow C binary under the hood

Pending
Overview
Eval results
Files

savedmodel.mddocs/

SavedModel Support

TensorFlow.js Node provides comprehensive support for loading and running TensorFlow SavedModels, enabling seamless integration with models trained in Python TensorFlow. This allows you to deploy Python-trained models in Node.js environments for high-performance inference.

Capabilities

Loading SavedModels

Load SavedModel

Load a TensorFlow SavedModel from the file system for inference.

/**
 * Load a TensorFlow SavedModel for inference
 * @param path - Path to the SavedModel directory
 * @param tags - Model tags to load (default: ['serve'])
 * @param signature - Signature to use for inference (default: 'serving_default')
 * @returns Promise resolving to TFSavedModel instance
 */
function loadSavedModel(
  path: string,
  tags?: string[],
  signature?: string
): Promise<TFSavedModel>;

Usage Example:

import * as tf from '@tensorflow/tfjs-node';

// Load a SavedModel
const model = await tf.node.loadSavedModel('./path/to/saved_model');

console.log('Model inputs:', model.inputs);
console.log('Model outputs:', model.outputs);

// Run inference
const inputTensor = tf.tensor2d([[1.0, 2.0, 3.0, 4.0]]);
const prediction = model.predict(inputTensor) as Tensor;

console.log('Prediction:');
prediction.print();

// Clean up
inputTensor.dispose();
prediction.dispose();
model.dispose();

Load with Specific Tags and Signature

// Load model with specific serving tags
const model = await tf.node.loadSavedModel(
  './my_model',
  ['serve', 'gpu'],  // Use GPU-optimized version if available
  'predict'          // Use 'predict' signature instead of default
);

SavedModel Inspection

Get MetaGraphs Information

Inspect available MetaGraphs, tags, and signatures in a SavedModel without loading it.

/**
 * Get metadata about a SavedModel's available configurations
 * @param path - Path to the SavedModel directory
 * @returns Promise resolving to array of MetaGraph information
 */
function getMetaGraphsFromSavedModel(path: string): Promise<MetaGraph[]>;

Usage Example:

// Inspect SavedModel before loading
const metaGraphs = await tf.node.getMetaGraphsFromSavedModel('./my_model');

console.log('Available MetaGraphs:');
metaGraphs.forEach((metaGraph, index) => {
  console.log(`MetaGraph ${index}:`);
  console.log('  Tags:', metaGraph.tags);
  console.log('  Signatures:', Object.keys(metaGraph.signatureDef));
  
  // Show signature details
  Object.entries(metaGraph.signatureDef).forEach(([sigName, sigDef]) => {
    console.log(`  Signature "${sigName}":`);
    console.log('    Inputs:', Object.keys(sigDef.inputs));
    console.log('    Outputs:', Object.keys(sigDef.outputs));
  });
});

// Load model with discovered tags and signature
if (metaGraphs.length > 0) {
  const firstMetaGraph = metaGraphs[0];
  const availableTags = firstMetaGraph.tags;
  const availableSignatures = Object.keys(firstMetaGraph.signatureDef);
  
  const model = await tf.node.loadSavedModel(
    './my_model',
    availableTags,
    availableSignatures[0]
  );
}

Model Management

Get Number of Loaded Models

Track the number of SavedModels currently loaded in memory.

/**
 * Get the number of currently loaded SavedModels
 * @returns Number of loaded SavedModel instances
 */
function getNumOfSavedModels(): number;

Usage Example:

console.log('Initially loaded models:', tf.node.getNumOfSavedModels()); // 0

const model1 = await tf.node.loadSavedModel('./model1');
console.log('After loading model1:', tf.node.getNumOfSavedModels()); // 1

const model2 = await tf.node.loadSavedModel('./model2');
console.log('After loading model2:', tf.node.getNumOfSavedModels()); // 2

model1.dispose();
console.log('After disposing model1:', tf.node.getNumOfSavedModels()); // 1

model2.dispose();
console.log('After disposing model2:', tf.node.getNumOfSavedModels()); // 0

TFSavedModel Interface

The loaded SavedModel implements the InferenceModel interface with additional SavedModel-specific properties.

interface TFSavedModel extends InferenceModel {
  /** Input tensor specifications */
  inputs: ModelTensorInfo;
  
  /** Output tensor specifications */
  outputs: ModelTensorInfo;
  
  /** Run inference on input data */
  predict(
    inputs: Tensor | Tensor[] | NamedTensorMap,
    config?: PredictConfig
  ): Tensor | Tensor[] | NamedTensorMap;
  
  /** Get intermediate activations (not yet implemented) */
  execute(
    inputs: Tensor | Tensor[] | NamedTensorMap,
    outputs: string | string[]
  ): Tensor | Tensor[];
  
  /** Release model resources */
  dispose(): void;
}

interface ModelTensorInfo {
  [inputName: string]: {
    name: string;
    shape: number[];
    dtype: string;
  };
}

interface PredictConfig {
  batchSize?: number;
  verbose?: boolean;
}

SavedModel Metadata Types

interface MetaGraph {
  /** Tags associated with this MetaGraph */
  tags: string[];
  
  /** Available signature definitions */
  signatureDef: {[key: string]: SignatureDefEntry};
}

interface SignatureDefEntry {
  /** Input tensor specifications */
  inputs: {[key: string]: TensorInfo};
  
  /** Output tensor specifications */
  outputs: {[key: string]: TensorInfo};
  
  /** Method name */
  methodName: string;
}

interface TensorInfo {
  /** Tensor name in the graph */
  name: string;
  
  /** Tensor shape (-1 for dynamic dimensions) */
  shape: number[];
  
  /** Data type */
  dtype: string;
}

Common Usage Patterns

Image Classification Model

import * as tf from '@tensorflow/tfjs-node';
import * as fs from 'fs';

async function classifyImage(modelPath: string, imagePath: string) {
  // Load the SavedModel
  const model = await tf.node.loadSavedModel(modelPath);
  
  // Check model input requirements
  console.log('Model expects inputs:', model.inputs);
  
  // Load and preprocess image
  const imageBuffer = fs.readFileSync(imagePath);
  const imageArray = new Uint8Array(imageBuffer);
  const imageTensor = tf.node.decodeImage(imageArray, 3);
  
  // Resize to model input size (assuming 224x224)
  const resized = tf.image.resizeBilinear(imageTensor, [224, 224]);
  
  // Normalize and add batch dimension
  const normalized = resized.div(255.0).expandDims(0);
  
  // Run inference
  const predictions = model.predict(normalized) as Tensor;
  
  // Get top prediction
  const topK = tf.topk(predictions, 5);
  const indices = await topK.indices.data();
  const values = await topK.values.data();
  
  console.log('Top 5 predictions:');
  for (let i = 0; i < 5; i++) {
    console.log(`  Class ${indices[i]}: ${values[i].toFixed(4)}`);
  }
  
  // Clean up
  imageTensor.dispose();
  resized.dispose();
  normalized.dispose();
  predictions.dispose();
  topK.indices.dispose();
  topK.values.dispose();
  model.dispose();
}

// Usage
classifyImage('./image_classifier_model', './test_image.jpg');

Text Processing Model

async function processText(modelPath: string, texts: string[]) {
  const model = await tf.node.loadSavedModel(modelPath);
  
  // Assume the model expects tokenized input
  // (In practice, you'd use a proper tokenizer)
  const tokenized = texts.map(text => 
    text.split(' ').map(word => word.charCodeAt(0) % 1000)
  );
  
  // Pad sequences to same length
  const maxLen = Math.max(...tokenized.map(seq => seq.length));
  const padded = tokenized.map(seq => [
    ...seq,
    ...Array(maxLen - seq.length).fill(0)
  ]);
  
  // Convert to tensor
  const inputTensor = tf.tensor2d(padded);
  
  // Run inference
  const outputs = model.predict(inputTensor) as Tensor;
  
  console.log('Text processing results:');
  outputs.print();
  
  // Clean up
  inputTensor.dispose();
  outputs.dispose();
  model.dispose();
}

Named Input/Output Model

async function useNamedInputsOutputs(modelPath: string) {
  const model = await tf.node.loadSavedModel(modelPath);
  
  // Check input/output names
  console.log('Input names:', Object.keys(model.inputs));
  console.log('Output names:', Object.keys(model.outputs));
  
  // Create named inputs
  const namedInputs = {
    'input_1': tf.randomNormal([1, 10]),
    'input_2': tf.randomNormal([1, 20])
  };
  
  // Run prediction with named inputs
  const namedOutputs = model.predict(namedInputs) as NamedTensorMap;
  
  // Access outputs by name
  console.log('Output_1 shape:', namedOutputs['output_1'].shape);
  console.log('Output_2 shape:', namedOutputs['output_2'].shape);
  
  // Clean up
  Object.values(namedInputs).forEach(tensor => tensor.dispose());
  Object.values(namedOutputs).forEach(tensor => tensor.dispose());
  model.dispose();
}

Batch Processing

async function batchProcess(modelPath: string, batchSize: number = 32) {
  const model = await tf.node.loadSavedModel(modelPath);
  
  // Create batch of dummy data
  const batchInput = tf.randomNormal([batchSize, 224, 224, 3]);
  
  // Process entire batch at once
  const batchOutput = model.predict(batchInput, {
    batchSize: batchSize,
    verbose: true
  }) as Tensor;
  
  console.log('Batch input shape:', batchInput.shape);
  console.log('Batch output shape:', batchOutput.shape);
  
  // Process results
  const results = await batchOutput.data();
  console.log(`Processed ${batchSize} samples`);
  
  // Clean up
  batchInput.dispose();
  batchOutput.dispose();
  model.dispose();
}

Error Handling

async function robustModelLoading(modelPath: string) {
  try {
    // First, inspect the model to understand its structure
    const metaGraphs = await tf.node.getMetaGraphsFromSavedModel(modelPath);
    
    if (metaGraphs.length === 0) {
      throw new Error('No MetaGraphs found in SavedModel');
    }
    
    // Choose appropriate tags and signature
    const metaGraph = metaGraphs[0];
    const tags = metaGraph.tags;
    const signatures = Object.keys(metaGraph.signatureDef);
    
    if (signatures.length === 0) {
      throw new Error('No signatures found in MetaGraph');
    }
    
    // Load the model
    const model = await tf.node.loadSavedModel(modelPath, tags, signatures[0]);
    
    console.log('Model loaded successfully');
    console.log('Available signatures:', signatures);
    
    return model;
    
  } catch (error) {
    console.error('Error loading SavedModel:', error.message);
    
    // Check if path exists
    if (!fs.existsSync(modelPath)) {
      console.error('Model path does not exist:', modelPath);
    }
    
    // Check if it's a valid SavedModel directory
    const pbFile = path.join(modelPath, 'saved_model.pb');
    if (!fs.existsSync(pbFile)) {
      console.error('saved_model.pb not found. Not a valid SavedModel.');
    }
    
    throw error;
  }
}

Install with Tessl CLI

npx tessl i tessl/npm-tensorflow--tfjs-node

docs

callbacks.md

image-processing.md

index.md

io.md

savedmodel.md

tensorboard.md

tile.json