TensorFlow backend for TensorFlow.js via Node.js - provides native TensorFlow execution in backend JavaScript applications under the Node.js runtime, accelerated by the TensorFlow C binary under the hood
—
TensorFlow.js Node provides comprehensive TensorBoard integration for logging training metrics, visualizing model performance, and monitoring training progress. This enables the same rich visualization capabilities available in Python TensorFlow directly in Node.js applications.
Create a TensorBoard summary writer for logging scalar and histogram data.
/**
* Create a TensorBoard summary writer
* @param logdir - Directory to write TensorBoard logs
* @param maxQueue - Maximum number of summaries to queue before writing (default: 10)
* @param flushMillis - How often to flush summaries to disk in milliseconds (default: 120000)
* @param filenameSuffix - Optional suffix for log filenames
* @returns SummaryFileWriter instance
*/
function summaryFileWriter(
logdir: string,
maxQueue?: number,
flushMillis?: number,
filenameSuffix?: string
): SummaryFileWriter;Usage Example:
import * as tf from '@tensorflow/tfjs-node';
// Create a summary writer
const writer = tf.node.summaryFileWriter('./logs/training');
// Log scalar values
writer.scalar('loss', 0.5, 1);
writer.scalar('accuracy', 0.85, 1);
writer.scalar('learning_rate', 0.001, 1);
// Log with different step
writer.scalar('loss', 0.4, 2);
writer.scalar('accuracy', 0.87, 2);
// Force write to disk
writer.flush();Create a callback for automatic logging during model training.
/**
* Create a TensorBoard callback for model training
* @param logdir - Directory to write TensorBoard logs (optional, defaults to './logs')
* @param args - Additional configuration options
* @returns TensorBoardCallback instance
*/
function tensorBoard(
logdir?: string,
args?: TensorBoardCallbackArgs
): TensorBoardCallback;
interface TensorBoardCallbackArgs {
/** How often to log ('batch' or 'epoch') */
updateFreq?: 'batch' | 'epoch';
/** How often to log histograms (0 = never) */
histogramFreq?: number;
}Usage Example:
import * as tf from '@tensorflow/tfjs-node';
// Create model
const model = tf.sequential({
layers: [
tf.layers.dense({ inputShape: [4], units: 10, activation: 'relu' }),
tf.layers.dense({ units: 3, activation: 'softmax' })
]
});
model.compile({
optimizer: 'adam',
loss: 'sparseCategoricalCrossentropy',
metrics: ['accuracy']
});
// Create TensorBoard callback
const tensorboardCallback = tf.node.tensorBoard('./logs/experiment_1', {
updateFreq: 'epoch',
histogramFreq: 1 // Log histograms every epoch
});
// Generate training data
const xs = tf.randomNormal([1000, 4]);
const ys = tf.randomUniform([1000, 1], 0, 3, 'int32');
// Train with TensorBoard logging
await model.fit(xs, ys, {
epochs: 50,
batchSize: 32,
validationSplit: 0.2,
callbacks: [tensorboardCallback]
});
console.log('Training complete. View logs with: tensorboard --logdir ./logs');The SummaryFileWriter class provides methods for logging different types of data to TensorBoard.
interface SummaryFileWriter {
/** Write a scalar value */
scalar(name: string, value: number, step: number, description?: string): void;
/** Write a histogram of tensor values */
histogram(name: string, data: Tensor, step: number, buckets?: number, description?: string): void;
/** Force write buffered summaries to disk */
flush(): void;
}Log scalar metrics like loss, accuracy, and learning rate.
/**
* Write a scalar summary
* @param name - Name of the scalar (will appear in TensorBoard)
* @param value - Scalar value to log
* @param step - Training step or epoch number
* @param description - Optional description for the metric
*/
scalar(name: string, value: number, step: number, description?: string): void;Usage Example:
const writer = tf.node.summaryFileWriter('./logs/metrics');
// Log training metrics
for (let epoch = 0; epoch < 100; epoch++) {
// Simulate training
const loss = Math.exp(-epoch * 0.1) + Math.random() * 0.1;
const accuracy = 1 - Math.exp(-epoch * 0.05) - Math.random() * 0.05;
const lr = 0.001 * Math.pow(0.95, epoch);
// Log to TensorBoard
writer.scalar('training/loss', loss, epoch, 'Cross-entropy loss during training');
writer.scalar('training/accuracy', accuracy, epoch, 'Classification accuracy');
writer.scalar('hyperparameters/learning_rate', lr, epoch, 'Current learning rate');
// Log validation metrics (simulated)
if (epoch % 5 === 0) {
const valLoss = loss + Math.random() * 0.05;
const valAccuracy = accuracy - Math.random() * 0.02;
writer.scalar('validation/loss', valLoss, epoch);
writer.scalar('validation/accuracy', valAccuracy, epoch);
}
}
writer.flush();
console.log('Metrics logged. Run: tensorboard --logdir ./logs');Log histograms of tensor values to visualize weight distributions and activations.
/**
* Write a histogram summary
* @param name - Name of the histogram
* @param data - Tensor containing the data to histogram
* @param step - Training step or epoch number
* @param buckets - Number of histogram buckets (default: 30)
* @param description - Optional description
*/
histogram(name: string, data: Tensor, step: number, buckets?: number, description?: string): void;Usage Example:
const writer = tf.node.summaryFileWriter('./logs/weights');
// Create a model to monitor
const model = tf.sequential({
layers: [
tf.layers.dense({ inputShape: [10], units: 5, activation: 'relu', name: 'hidden' }),
tf.layers.dense({ units: 1, activation: 'sigmoid', name: 'output' })
]
});
// Log initial weight distributions
model.layers.forEach((layer, layerIndex) => {
if ('getWeights' in layer) {
const weights = layer.getWeights();
weights.forEach((weight, weightIndex) => {
writer.histogram(
`layer_${layerIndex}/weights_${weightIndex}`,
weight,
0,
50,
`Weight distribution for layer ${layer.name}`
);
});
}
});
// Simulate training and log weight changes
for (let epoch = 1; epoch <= 10; epoch++) {
// ... training code here ...
// Log weight distributions periodically
if (epoch % 5 === 0) {
model.layers.forEach((layer, layerIndex) => {
if ('getWeights' in layer) {
const weights = layer.getWeights();
weights.forEach((weight, weightIndex) => {
writer.histogram(
`layer_${layerIndex}/weights_${weightIndex}`,
weight,
epoch,
50
);
});
}
});
}
}
writer.flush();The TensorBoardCallback automatically logs metrics during training.
class TensorBoardCallback extends CustomCallback {
constructor(logdir?: string, updateFreq?: 'batch' | 'epoch', histogramFreq?: number);
}
interface TensorBoardCallbackArgs {
/** How often to log: 'batch' logs after each batch, 'epoch' logs after each epoch */
updateFreq?: 'batch' | 'epoch';
/** How often to log weight/activation histograms (in epochs, 0 = never) */
histogramFreq?: number;
}async function runExperiment(
experimentName: string,
learningRate: number,
batchSize: number
) {
const logdir = `./logs/${experimentName}`;
const writer = tf.node.summaryFileWriter(logdir);
// Log hyperparameters
writer.scalar('hyperparameters/learning_rate', learningRate, 0);
writer.scalar('hyperparameters/batch_size', batchSize, 0);
// Create model
const model = tf.sequential({
layers: [
tf.layers.dense({ inputShape: [784], units: 128, activation: 'relu' }),
tf.layers.dropout({ rate: 0.2 }),
tf.layers.dense({ units: 10, activation: 'softmax' })
]
});
model.compile({
optimizer: tf.train.adam(learningRate),
loss: 'sparseCategoricalCrossentropy',
metrics: ['accuracy']
});
// Create TensorBoard callback
const tbCallback = tf.node.tensorBoard(logdir, {
updateFreq: 'epoch',
histogramFreq: 5
});
// Generate dummy data
const xs = tf.randomNormal([5000, 784]);
const ys = tf.randomUniform([5000, 1], 0, 10, 'int32');
// Train
const history = await model.fit(xs, ys, {
epochs: 50,
batchSize: batchSize,
validationSplit: 0.2,
callbacks: [tbCallback]
});
// Log final metrics
const finalLoss = history.history.loss.slice(-1)[0];
const finalAccuracy = history.history.acc.slice(-1)[0];
writer.scalar('final/loss', finalLoss, 50);
writer.scalar('final/accuracy', finalAccuracy, 50);
writer.flush();
// Cleanup
xs.dispose();
ys.dispose();
model.dispose();
return { finalLoss, finalAccuracy };
}
// Run multiple experiments
async function hyperparameterSweep() {
const experiments = [
{ name: 'exp_lr_001_bs_32', lr: 0.001, bs: 32 },
{ name: 'exp_lr_01_bs_32', lr: 0.01, bs: 32 },
{ name: 'exp_lr_001_bs_64', lr: 0.001, bs: 64 },
{ name: 'exp_lr_01_bs_64', lr: 0.01, bs: 64 }
];
for (const exp of experiments) {
console.log(`Running experiment: ${exp.name}`);
const results = await runExperiment(exp.name, exp.lr, exp.bs);
console.log(`Results: loss=${results.finalLoss}, acc=${results.finalAccuracy}`);
}
console.log('All experiments complete. Run: tensorboard --logdir ./logs');
}class CustomMetricsLogger {
private writer: tf.SummaryFileWriter;
constructor(logdir: string) {
this.writer = tf.node.summaryFileWriter(logdir);
}
logTrainingStep(
step: number,
loss: number,
accuracy: number,
gradientNorm: number,
learningRate: number
) {
this.writer.scalar('training/loss', loss, step);
this.writer.scalar('training/accuracy', accuracy, step);
this.writer.scalar('training/gradient_norm', gradientNorm, step);
this.writer.scalar('training/learning_rate', learningRate, step);
}
logValidationStep(step: number, valLoss: number, valAccuracy: number) {
this.writer.scalar('validation/loss', valLoss, step);
this.writer.scalar('validation/accuracy', valAccuracy, step);
}
logModelWeights(model: tf.LayersModel, step: number) {
model.layers.forEach((layer, layerIdx) => {
if ('getWeights' in layer && layer.getWeights().length > 0) {
const weights = layer.getWeights();
weights.forEach((weight, weightIdx) => {
// Log weight statistics
const mean = tf.mean(weight);
const std = tf.moments(weight).variance.sqrt();
const min = tf.min(weight);
const max = tf.max(weight);
this.writer.scalar(`weights/layer_${layerIdx}_${weightIdx}/mean`, mean.dataSync()[0], step);
this.writer.scalar(`weights/layer_${layerIdx}_${weightIdx}/std`, std.dataSync()[0], step);
this.writer.scalar(`weights/layer_${layerIdx}_${weightIdx}/min`, min.dataSync()[0], step);
this.writer.scalar(`weights/layer_${layerIdx}_${weightIdx}/max`, max.dataSync()[0], step);
// Log histogram
this.writer.histogram(`weights/layer_${layerIdx}_${weightIdx}/distribution`, weight, step);
// Cleanup
mean.dispose();
std.dispose();
min.dispose();
max.dispose();
});
}
});
}
flush() {
this.writer.flush();
}
}
// Usage
const logger = new CustomMetricsLogger('./logs/detailed');
// During training loop
for (let step = 0; step < 1000; step++) {
// ... training step ...
logger.logTrainingStep(step, loss, accuracy, gradNorm, lr);
if (step % 100 === 0) {
logger.logValidationStep(step, valLoss, valAccuracy);
logger.logModelWeights(model, step);
}
}
logger.flush();class RealTimeTrainingMonitor {
private writer: tf.SummaryFileWriter;
private startTime: number;
constructor(logdir: string) {
this.writer = tf.node.summaryFileWriter(logdir, 1, 1000); // Flush every second
this.startTime = Date.now();
}
logSystemMetrics(step: number) {
const memUsage = process.memoryUsage();
const timeElapsed = (Date.now() - this.startTime) / 1000; // seconds
// Log memory usage
this.writer.scalar('system/memory_rss_mb', memUsage.rss / 1024 / 1024, step);
this.writer.scalar('system/memory_heap_used_mb', memUsage.heapUsed / 1024 / 1024, step);
this.writer.scalar('system/memory_heap_total_mb', memUsage.heapTotal / 1024 / 1024, step);
// Log timing
this.writer.scalar('system/time_elapsed_sec', timeElapsed, step);
// Log TensorFlow.js memory
const tfMemory = tf.memory();
this.writer.scalar('tensorflow/num_tensors', tfMemory.numTensors, step);
this.writer.scalar('tensorflow/num_data_buffers', tfMemory.numDataBuffers, step);
this.writer.scalar('tensorflow/num_bytes', tfMemory.numBytes, step);
}
flush() {
this.writer.flush();
}
}
// Usage during training
const monitor = new RealTimeTrainingMonitor('./logs/monitoring');
// Log system metrics every 10 steps
for (let step = 0; step < 1000; step++) {
// ... training ...
if (step % 10 === 0) {
monitor.logSystemMetrics(step);
monitor.flush();
}
}After logging data, start TensorBoard to visualize the results:
tensorboard --logdir ./logs
# For multiple experiments
tensorboard --logdir ./logs --port 6007
# With custom sampling
tensorboard --logdir ./logs --reload_interval 5Then open your browser to http://localhost:6006 (or the specified port) to view the visualizations.
Install with Tessl CLI
npx tessl i tessl/npm-tensorflow--tfjs-node