CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-tensorflow--tfjs-backend-cpu

JavaScript CPU backend implementation for TensorFlow.js enabling machine learning operations in vanilla JavaScript

Pending
Overview
Eval results
Files

backend-cpu.mddocs/

MathBackendCPU Class

The MathBackendCPU class is the core CPU backend implementation for TensorFlow.js, extending the base KernelBackend class to provide high-performance tensor operations using vanilla JavaScript.

Class Definition

import { KernelBackend, DataType, TensorInfo, DataId, DataStorage } from '@tensorflow/tfjs-core';

export class MathBackendCPU extends KernelBackend {
  // Public properties
  public blockSize: number = 48;
  data: DataStorage<TensorData<DataType>>;
  
  // Methods documented below...
}

Type Definitions

TensorData Interface

interface TensorData<D extends DataType> {
  values?: BackendValues;
  dtype: D;
  complexTensorInfos?: { real: TensorInfo, imag: TensorInfo };
  refCount: number;
}

BackendValues Type

import type { BackendValues } from '@tensorflow/tfjs-core';

// BackendValues can be:
// - TypedArray (Float32Array, Int32Array, Uint8Array, etc.)
// - number[] 
// - string[]
// - Uint8Array[] (for string tensors)

Memory and Timing Types

interface MemoryInfo {
  numTensors: number;
  numDataBuffers: number;  
  numBytes: number;
  unreliable: boolean;
}

interface BackendTimingInfo {
  kernelMs: number;
}

Public Properties

blockSize

public blockSize: number = 48;

The block size used for various operations and memory allocation strategies. This value is optimized for CPU performance and affects how operations are chunked for processing.

data

data: DataStorage<TensorData<DataType>>;

Internal data storage that manages all tensor data using a reference counting system. Each tensor gets a unique DataId that maps to its TensorData.

Core Methods

write()

write(values: BackendValues, shape: number[], dtype: DataType): DataId

Writes tensor data to the backend storage system.

Parameters:

  • values: BackendValues - The tensor values (TypedArray, number[], or string[])
  • shape: number[] - The dimensions of the tensor
  • dtype: DataType - The data type ('float32', 'int32', 'bool', 'string', etc.)

Returns: DataId - Unique identifier for the stored tensor data

Example:

import { MathBackendCPU } from '@tensorflow/tfjs-backend-cpu/base';

const backend = new MathBackendCPU();

// Write float32 data
const dataId1 = backend.write(
  new Float32Array([1.0, 2.5, 3.7, 4.2]), 
  [2, 2], 
  'float32'
);

// Write integer data  
const dataId2 = backend.write(
  new Int32Array([10, 20, 30]), 
  [3], 
  'int32'
);

// Write string data
const dataId3 = backend.write(
  ['hello', 'world'], 
  [2], 
  'string'
);

makeTensorInfo()

makeTensorInfo(
  shape: number[], 
  dtype: DataType, 
  values?: BackendValues | string[]
): TensorInfo

Creates a TensorInfo object with data stored in the CPU backend.

Parameters:

  • shape: number[] - The tensor dimensions
  • dtype: DataType - The data type
  • values?: BackendValues | string[] - Optional initial values (if not provided, creates uninitialized tensor)

Returns: TensorInfo - Complete tensor information object

Example:

const backend = new MathBackendCPU();

// Create tensor with initial values
const tensorInfo1 = backend.makeTensorInfo(
  [3, 3], 
  'float32',
  new Float32Array([1, 2, 3, 4, 5, 6, 7, 8, 9])
);

// Create uninitialized tensor
const tensorInfo2 = backend.makeTensorInfo([5], 'int32');

// Create boolean tensor
const tensorInfo3 = backend.makeTensorInfo(
  [2, 2], 
  'bool',
  new Uint8Array([1, 0, 1, 0])
);

read()

read(dataId: DataId): Promise<BackendValues>

Asynchronously reads tensor data from storage.

Parameters:

  • dataId: DataId - The unique identifier for the tensor data

Returns: Promise<BackendValues> - Promise resolving to the tensor values

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

// Async read
const values = await backend.read(dataId);
console.log(values); // Float32Array([1, 2, 3])

readSync()

readSync(dataId: DataId): BackendValues

Synchronously reads tensor data from storage.

Parameters:

  • dataId: DataId - The unique identifier for the tensor data

Returns: BackendValues - The tensor values

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Int32Array([10, 20, 30]), [3], 'int32');

// Sync read
const values = backend.readSync(dataId);
console.log(values); // Int32Array([10, 20, 30])

bufferSync()

bufferSync<R extends Rank, D extends DataType>(t: TensorInfo): TensorBuffer<R, D>

Creates a TensorBuffer that provides indexed access to tensor data.

Parameters:

  • t: TensorInfo - The tensor information object

Returns: TensorBuffer<R, D> - Buffer providing get/set access by indices

Example:

const backend = new MathBackendCPU();
const tensorInfo = backend.makeTensorInfo([2, 3], 'float32');
const buffer = backend.bufferSync(tensorInfo);

// Set values using multi-dimensional indexing
buffer.set(1.5, 0, 0);  // Set value at [0, 0]
buffer.set(2.7, 0, 1);  // Set value at [0, 1] 
buffer.set(3.9, 1, 2);  // Set value at [1, 2]

// Get values
const val = buffer.get(0, 0); // 1.5

// Access underlying values array
const allValues = buffer.values; // Float32Array

makeOutput()

makeOutput<T extends Tensor>(
  values: BackendValues, 
  shape: number[], 
  dtype: DataType
): T

Creates an output tensor from computed values.

Parameters:

  • values: BackendValues - The computed output values
  • shape: number[] - The output tensor shape
  • dtype: DataType - The output data type

Returns: T extends Tensor - The output tensor

Example:

const backend = new MathBackendCPU();

// Create output from computation
const result = backend.makeOutput(
  new Float32Array([2, 4, 6, 8]), // computed values
  [2, 2],                         // output shape
  'float32'                       // dtype
);

// Use in kernel implementations
function doubleKernel(inputs: { x: TensorInfo }, backend: MathBackendCPU): TensorInfo {
  const { x } = inputs;
  const values = backend.readSync(x.dataId) as Float32Array;
  const doubled = values.map(v => v * 2);
  
  return backend.makeOutput(doubled, x.shape, x.dtype);
}

Memory Management Methods

refCount()

refCount(dataId: DataId): number

Returns the current reference count for tensor data.

Parameters:

  • dataId: DataId - The data identifier

Returns: number - Current reference count

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

console.log(backend.refCount(dataId)); // 1

incRef()

incRef(dataId: DataId): void

Increments the reference count for tensor data. Use this when storing additional references to prevent premature cleanup.

Parameters:

  • dataId: DataId - The data identifier

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

backend.incRef(dataId);
console.log(backend.refCount(dataId)); // 2

// Now dataId won't be cleaned up until refCount reaches 0

decRef()

decRef(dataId: DataId): void

Decrements the reference count for tensor data. When count reaches 0, data becomes eligible for cleanup.

Parameters:

  • dataId: DataId - The data identifier

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

backend.incRef(dataId); // refCount = 2
backend.decRef(dataId); // refCount = 1  
backend.decRef(dataId); // refCount = 0 (eligible for cleanup)

disposeData()

disposeData(dataId: DataId, force?: boolean): boolean

Disposes tensor data from memory if reference count allows.

Parameters:

  • dataId: DataId - The data identifier
  • force?: boolean = false - If true, dispose regardless of reference count

Returns: boolean - True if memory was actually released

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

// Won't dispose if refCount > 0
let disposed = backend.disposeData(dataId);
console.log(disposed); // false (refCount is 1)

// Force disposal
disposed = backend.disposeData(dataId, true);
console.log(disposed); // true (memory released)

// Or decrement refCount first
const dataId2 = backend.write(new Float32Array([4, 5, 6]), [3], 'float32');
backend.decRef(dataId2); // refCount becomes 0
disposed = backend.disposeData(dataId2);
console.log(disposed); // true

disposeIntermediateTensorInfo()

disposeIntermediateTensorInfo(tensorInfo: TensorInfo): void

Disposes intermediate tensor data created during computations.

Parameters:

  • tensorInfo: TensorInfo - The intermediate tensor to dispose

Example:

function complexOperation(backend: MathBackendCPU, input: TensorInfo): TensorInfo {
  // Create intermediate result
  const intermediate = backend.makeTensorInfo([10], 'float32', new Float32Array(10));
  
  // Use intermediate in computation...
  const finalResult = backend.makeOutput(
    backend.readSync(intermediate.dataId),
    [10], 
    'float32'
  );
  
  // Clean up intermediate
  backend.disposeIntermediateTensorInfo(intermediate);
  
  return finalResult;
}

Utility Methods

move()

move(
  dataId: DataId, 
  values: BackendValues, 
  shape: number[], 
  dtype: DataType, 
  refCount: number
): void

Moves tensor data to a new location with updated metadata.

Parameters:

  • dataId: DataId - The data identifier
  • values: BackendValues - New tensor values
  • shape: number[] - New tensor shape
  • dtype: DataType - New data type
  • refCount: number - New reference count

Example:

const backend = new MathBackendCPU();
const dataId = backend.write(new Float32Array([1, 2]), [2], 'float32');

// Move data to new configuration
backend.move(
  dataId,
  new Float32Array([1, 2, 3, 4]), // new values
  [2, 2],                         // new shape
  'float32',                      // same dtype
  1                               // reset refCount
);

numDataIds()

numDataIds(): number

Returns the total number of data items currently stored.

Returns: number - Count of stored tensor data items

Example:

const backend = new MathBackendCPU();

console.log(backend.numDataIds()); // 0

const dataId1 = backend.write(new Float32Array([1, 2]), [2], 'float32');
const dataId2 = backend.write(new Int32Array([3, 4]), [2], 'int32');

console.log(backend.numDataIds()); // 2

backend.disposeData(dataId1, true);
console.log(backend.numDataIds()); // 1

time()

time(f: () => void): Promise<BackendTimingInfo>

Times the execution of a function and returns performance information.

Parameters:

  • f: () => void - Function to time

Returns: Promise<BackendTimingInfo> - Timing information with kernelMs property

Example:

const backend = new MathBackendCPU();

const timingInfo = await backend.time(() => {
  // Expensive operation
  const dataId = backend.write(new Float32Array(10000).fill(1), [10000], 'float32');
  const values = backend.readSync(dataId);
  const sum = Array.from(values).reduce((a, b) => a + b, 0);
});

console.log(`Operation took ${timingInfo.kernelMs}ms`);

memory()

memory(): MemoryInfo

Returns memory usage information (note: marked as unreliable due to JavaScript garbage collection).

Returns: MemoryInfo - Memory information object

Example:

const backend = new MathBackendCPU();

const memInfo = backend.memory();
console.log({
  numTensors: memInfo.numTensors,
  numDataBuffers: memInfo.numDataBuffers,
  numBytes: memInfo.numBytes,
  unreliable: memInfo.unreliable // Always true for CPU backend
});

where()

where(condition: Tensor): Tensor2D

Returns the indices where the condition tensor is true.

Parameters:

  • condition: Tensor - Boolean condition tensor

Returns: Tensor2D - 2D tensor of indices where condition is true

Example:

import * as tf from '@tensorflow/tfjs-core';

const backend = new MathBackendCPU();

// Create condition tensor
const condition = tf.tensor1d([true, false, true, false], 'bool');
const indices = backend.where(condition);

console.log(await indices.data()); // Indices where condition is true

floatPrecision()

floatPrecision(): 16 | 32

Returns the floating-point precision used by the backend.

Returns: 32 - Always returns 32 for CPU backend (32-bit floats)

Example:

const backend = new MathBackendCPU();
console.log(backend.floatPrecision()); // 32

epsilon()

epsilon(): number

Returns the machine epsilon (smallest representable positive number).

Returns: number - Machine epsilon value

Example:

const backend = new MathBackendCPU();
console.log(backend.epsilon()); // ~1.1920928955078125e-7

dispose()

dispose(): void

Cleans up all backend resources and stored tensor data.

Example:

const backend = new MathBackendCPU();

// Use backend for operations...
const dataId = backend.write(new Float32Array([1, 2, 3]), [3], 'float32');

// Clean up when done
backend.dispose();

console.log(backend.numDataIds()); // 0 (all data cleaned up)

Advanced Usage Examples

Custom Kernel Implementation

import { KernelConfig, KernelFunc } from '@tensorflow/tfjs-core';
import { MathBackendCPU } from '@tensorflow/tfjs-backend-cpu/base';

// Custom kernel that squares all values
const squareKernel: KernelFunc = ({ inputs, backend }) => {
  const { x } = inputs;
  const cpuBackend = backend as MathBackendCPU;
  
  const values = cpuBackend.readSync(x.dataId) as Float32Array;
  const squaredValues = new Float32Array(values.length);
  
  for (let i = 0; i < values.length; i++) {
    squaredValues[i] = values[i] * values[i];
  }
  
  return cpuBackend.makeOutput(squaredValues, x.shape, x.dtype);
};

// Register the kernel
const squareConfig: KernelConfig = {
  kernelName: 'Square',
  backendName: 'cpu', 
  kernelFunc: squareKernel
};

Memory Pool Management

class ManagedBackend {
  private backend: MathBackendCPU;
  private activeDataIds: Set<DataId>;
  
  constructor() {
    this.backend = new MathBackendCPU();
    this.activeDataIds = new Set();
  }
  
  createTensor(values: BackendValues, shape: number[], dtype: DataType): DataId {
    const dataId = this.backend.write(values, shape, dtype);
    this.activeDataIds.add(dataId);
    return dataId;
  }
  
  cloneData(dataId: DataId): DataId {
    this.backend.incRef(dataId);
    return dataId;
  }
  
  cleanup(): void {
    // Dispose all managed tensors
    for (const dataId of this.activeDataIds) {
      while (this.backend.refCount(dataId) > 0) {
        this.backend.decRef(dataId);
      }
      this.backend.disposeData(dataId, true);
    }
    this.activeDataIds.clear();
  }
}

Performance Monitoring

class PerformanceMonitor {
  private backend: MathBackendCPU;
  private operationCounts: Map<string, number> = new Map();
  private totalTime: number = 0;
  
  constructor(backend: MathBackendCPU) {
    this.backend = backend;
  }
  
  async timeOperation<T>(name: string, operation: () => T): Promise<T> {
    const startTime = performance.now();
    
    const timingInfo = await this.backend.time(() => {
      const result = operation();
      return result;
    });
    
    const endTime = performance.now();
    
    // Update statistics
    this.operationCounts.set(name, (this.operationCounts.get(name) || 0) + 1);
    this.totalTime += timingInfo.kernelMs;
    
    console.log(`${name}: ${timingInfo.kernelMs}ms (Total: ${endTime - startTime}ms)`);
    
    return operation();
  }
  
  getStats() {
    return {
      operations: Object.fromEntries(this.operationCounts),
      totalKernelTime: this.totalTime,
      memoryInfo: this.backend.memory()
    };
  }
}

Install with Tessl CLI

npx tessl i tessl/npm-tensorflow--tfjs-backend-cpu

docs

backend-cpu.md

index.md

shared-kernels.md

tile.json