Network, database, and transaction configuration options for fine-tuning performance and behavior. FoundationDB provides extensive configuration options for customizing operation at multiple levels.
Configure network-level settings before opening database connections.
/**
* Configure network settings
* Must be called before opening any database connections
* @param netOpts - Network configuration options
*/
function configNetwork(netOpts: NetworkOptions): void;
/**
* Network configuration options
*/
interface NetworkOptions {
/** Enable trace output to directory */
trace_enable?: string;
/** Maximum trace file size in bytes */
trace_roll_size?: number;
/** Maximum number of trace files (default: 10) */
trace_max_logs_size?: number;
/** Trace log group identifier */
trace_log_group?: string;
/** TLS certificate file path */
TLS_cert_path?: string;
/** TLS certificate data as bytes */
TLS_cert_bytes?: Buffer;
/** TLS key file path */
TLS_key_path?: string;
/** TLS key data as bytes */
TLS_key_bytes?: Buffer;
/** TLS CA certificate file path */
TLS_ca_path?: string;
/** TLS CA certificate data as bytes */
TLS_ca_bytes?: Buffer;
/** TLS password */
TLS_password?: string;
/** TLS verification pattern */
TLS_verify_peers?: string;
/** Directory for external client libraries */
external_client_directory?: string;
/** External client library path */
external_client?: string;
/** Disable client statistics logging */
disable_client_statistics_logging?: true;
/** Client threads per version */
client_threads_per_version?: number;
/** Disable multi-version client API */
disable_multi_version_client_api?: true;
/** Callbacks on external threads */
callbacks_on_external_threads?: true;
/** Trace format (xml or json) */
trace_format?: string;
/** Trace file identifier */
trace_file_identifier?: string;
/** Knob overrides (advanced) */
knob?: string;
}Usage Example:
import fdb from "foundationdb";
// Configure network before opening database
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_roll_size: 10485760, // 10 MB
trace_max_logs_size: 104857600, // 100 MB total
trace_format: "json",
});
fdb.setAPIVersion(620);
const db = fdb.open();
// TLS configuration
fdb.configNetwork({
TLS_cert_path: "/path/to/cert.pem",
TLS_key_path: "/path/to/key.pem",
TLS_ca_path: "/path/to/ca.pem",
TLS_verify_peers: "S.CN=foundationdb.example.com",
});
// External client library
fdb.configNetwork({
external_client_directory: "/usr/local/lib/fdb",
disable_multi_version_client_api: true,
});Configure database-level settings after opening a connection.
/**
* Set database options
* @param opts - Database configuration options
*/
setNativeOptions(opts: DatabaseOptions): void;
/**
* Database configuration options
*/
interface DatabaseOptions {
/** Location cache size in bytes (default: 100000) */
location_cache_size?: number;
/** Maximum number of outstanding watches (default: 10000) */
max_watches?: number;
/** Machine ID for coordination */
machine_id?: string;
/** Datacenter ID for coordination */
datacenter_id?: string;
/** Default transaction timeout in milliseconds */
transaction_timeout?: number;
/** Default transaction retry limit */
transaction_retry_limit?: number;
/** Default transaction size limit in bytes */
transaction_size_limit?: number;
/** Default transaction maximum retry delay in milliseconds */
transaction_max_retry_delay?: number;
/** Snapshot read-your-writes disable */
snapshot_ryw_enable?: true;
/** Snapshot read-your-writes disable */
snapshot_ryw_disable?: true;
}Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Configure database
db.setNativeOptions({
transaction_timeout: 10000, // 10 seconds
transaction_retry_limit: 100,
transaction_size_limit: 10000000, // 10 MB
max_watches: 20000,
location_cache_size: 200000,
});
// Machine and datacenter ID
db.setNativeOptions({
machine_id: "web-server-01",
datacenter_id: "us-west",
});Configure individual transaction behavior.
/**
* Set transaction options
* @param opt - Transaction option code or options object
* @param value - Option value (if using option code)
*/
setOption(
opt: TransactionOptionCode | keyof TransactionOptions,
value?: number | string | Buffer
): void;
/**
* Transaction configuration options
*/
interface TransactionOptions {
/** Timeout in milliseconds */
timeout?: number;
/** Retry limit */
retry_limit?: number;
/** Maximum retry delay in milliseconds */
max_retry_delay?: number;
/** Size limit in bytes */
size_limit?: number;
/** Disable read-your-writes semantics */
read_your_writes_disable?: true;
/** Disable read-ahead caching */
read_ahead_disable?: true;
/** Enable access to system keys (prefix \xFF) */
access_system_keys?: true;
/** Enable read access to system keys */
read_system_keys?: true;
/** System immediate priority (highest) */
priority_system_immediate?: true;
/** Batch priority (lowest) */
priority_batch?: true;
/** Make transaction lock-aware */
lock_aware?: true;
/** Use snapshot reads (non-conflicting) */
snapshot_ryw_enable?: true;
/** Disable snapshot reads */
snapshot_ryw_disable?: true;
/** Disable write conflict checking (causal consistency) */
causal_write_risky?: true;
/** Disable read conflict checking (causal consistency) */
causal_read_risky?: true;
/** Report conflicting keys in error */
report_conflicting_keys?: true;
/** Include port in addresses */
include_port_in_address?: true;
/** Next write has no write conflict range */
next_write_no_write_conflict_range?: true;
/** Debug transaction identifier */
debug_transaction_identifier?: string;
/** Log transaction */
log_transaction?: true;
/** Transaction logging max field length */
transaction_logging_max_field_length?: number;
/** Expensive clear cost estimation */
expensive_clear_cost_estimation_enable?: true;
/** Use GRV cache */
use_grv_cache?: true;
}
/**
* Transaction option codes (for advanced use)
*/
enum TransactionOptionCode {
CausalWriteRisky = 504,
CausalReadRisky = 505,
CausalReadDisable = 506,
NextWriteNoWriteConflictRange = 507,
ReadYourWritesDisable = 701,
ReadAheadDisable = 702,
DurabilityDatacenter = 110,
DurabilityRisky = 120,
DurabilityDevNullIsWebScale = 130,
PrioritySystemImmediate = 200,
PriorityBatch = 201,
InitializeNewDatabase = 300,
AccessSystemKeys = 301,
ReadSystemKeys = 302,
DebugRetryLogging = 401,
TransactionLoggingEnable = 402,
Timeout = 500,
RetryLimit = 501,
MaxRetryDelay = 502,
SizeLimit = 503,
LockAware = 510,
ReadLockAware = 511,
UsedDuringCommitProtectionDisable = 520,
DebugTransactionIdentifier = 403,
LogTransaction = 404,
TransactionLoggingMaxFieldLength = 405,
ReportConflictingKeys = 702,
SpecialKeySpaceRelaxed = 713,
SpecialKeySpaceEnableWrites = 714,
IncludePortInAddress = 505,
ExpensiveClearCostEstimationEnable = 521,
UseGrvCache = 700,
SnapshotRywEnable = 600,
SnapshotRywDisable = 601,
}Usage Example:
import fdb, { TransactionOptionCode } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Configure transaction at creation
await db.doTransaction(
async (tn) => {
// Transaction operations
tn.set("key", "value");
},
{
timeout: 5000,
retry_limit: 50,
priority_batch: true,
}
);
// Configure transaction during execution
await db.doTransaction(async (tn) => {
// Set high priority
tn.setOption(TransactionOptionCode.PrioritySystemImmediate);
// Custom timeout
tn.setOption(TransactionOptionCode.Timeout, 10000);
// Perform operations
tn.set("important", "data");
});
// Access system keys
await db.doTransaction(
async (tn) => {
// Can now read/write keys starting with \xFF
const systemKey = await tn.get("\xFF/coordinators");
},
{
access_system_keys: true,
}
);
// Disable conflict checking (causal consistency)
await db.doTransaction(async (tn) => {
tn.setOption(TransactionOptionCode.CausalWriteRisky);
// This write won't cause conflicts with other transactions
tn.set("cache:item", "data");
});Configure how range queries fetch data.
/**
* Streaming modes for range queries
*/
enum StreamingMode {
/** Fetch entire range eagerly (best for small ranges) */
WantAll = -2,
/** Default balanced fetching for iterators */
Iterator = -1,
/** Fetch exact number of items specified */
Exact = 0,
/** Small batches (more round trips, less memory) */
Small = 1,
/** Medium batches (balanced) */
Medium = 2,
/** Large batches (fewer round trips, more memory) */
Large = 3,
/** Very large batches (maximum throughput) */
Serial = 4,
}
interface RangeOptions {
/** Streaming mode controlling fetch eagerness */
streamingMode?: StreamingMode;
/** Maximum number of results */
limit?: number;
/** Return results in reverse order */
reverse?: boolean;
/** Target byte size for results */
targetBytes?: number;
}Usage Example:
import fdb, { StreamingMode } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
await db.doTransaction(async (tn) => {
// Small range: use WantAll
const config = await tn.getRangeAll("config:", "config:~", {
streamingMode: StreamingMode.WantAll,
});
// Large range: use Serial for bulk reads
for await (const batch of tn.getRangeBatch("data:", "data:~", {
streamingMode: StreamingMode.Serial,
})) {
// Process large batches
}
// Memory constrained: use Small
for await (const batch of tn.getRangeBatch("items:", "items:~", {
streamingMode: StreamingMode.Small,
})) {
// Smaller batches, more round trips
}
// Default: Iterator (balanced)
for await (const [key, value] of tn.getRange("user:")) {
// Automatic batch sizing
}
});Set transaction priority levels.
/**
* Transaction priority options
*/
interface TransactionOptions {
/** System immediate priority (highest) */
priority_system_immediate?: true;
/** Batch priority (lowest) */
priority_batch?: true;
// Default priority if neither specified
}Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// High priority for critical operations
await db.doTransaction(
async (tn) => {
// This transaction gets priority
tn.set("critical:data", "important");
},
{
priority_system_immediate: true,
}
);
// Low priority for batch jobs
await db.doTransaction(
async (tn) => {
// Won't interfere with normal operations
for (let i = 0; i < 10000; i++) {
tn.set(`batch:${i}`, `data${i}`);
}
},
{
priority_batch: true,
}
);
// Default priority (most common)
await db.doTransaction(async (tn) => {
tn.set("user:data", "value");
});Configure transaction timeout and retry behavior.
/**
* Timeout and retry options
*/
interface TransactionOptions {
/** Timeout in milliseconds (default: 5000) */
timeout?: number;
/** Retry limit (default: unlimited) */
retry_limit?: number;
/** Maximum retry delay in milliseconds (default: 1000) */
max_retry_delay?: number;
}
interface DatabaseOptions {
/** Default transaction timeout in milliseconds */
transaction_timeout?: number;
/** Default transaction retry limit */
transaction_retry_limit?: number;
/** Default transaction maximum retry delay in milliseconds */
transaction_max_retry_delay?: number;
}Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Set database defaults
db.setNativeOptions({
transaction_timeout: 10000, // 10 seconds
transaction_retry_limit: 100,
transaction_max_retry_delay: 2000, // 2 seconds
});
// Override per transaction
await db.doTransaction(
async (tn) => {
// Long-running operation
for await (const batch of tn.getRangeBatch("data:", "data:~")) {
// Process
}
},
{
timeout: 30000, // 30 seconds
retry_limit: 200,
}
);
// Short timeout for quick operations
await db.doTransaction(
async (tn) => {
const value = await tn.get("config");
},
{
timeout: 1000, // 1 second
}
);Configure transaction size limits.
/**
* Size limit options
*/
interface TransactionOptions {
/** Size limit in bytes (default: 10,000,000) */
size_limit?: number;
}
interface DatabaseOptions {
/** Default transaction size limit in bytes */
transaction_size_limit?: number;
}Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Set database default
db.setNativeOptions({
transaction_size_limit: 100000000, // 100 MB
});
// Per-transaction limit
await db.doTransaction(
async (tn) => {
// Large batch operation
for (let i = 0; i < 100000; i++) {
tn.set(`key:${i}`, `value:${i}`);
}
},
{
size_limit: 50000000, // 50 MB
}
);
// Check transaction size
await db.doTransaction(async (tn) => {
for (let i = 0; i < 10000; i++) {
tn.set(`key:${i}`, `value:${i}`);
}
const size = tn.getApproximateSize();
console.log("Transaction size:", size, "bytes");
if (size > 10000000) {
throw new Error("Transaction too large");
}
});Configure read-your-writes semantics.
/**
* Read-your-writes options
*/
interface TransactionOptions {
/** Disable read-your-writes semantics */
read_your_writes_disable?: true;
/** Enable snapshot reads */
snapshot_ryw_enable?: true;
/** Disable snapshot reads */
snapshot_ryw_disable?: true;
}Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Normal: read-your-writes enabled
await db.doTransaction(async (tn) => {
tn.set("key", "value");
const value = await tn.get("key");
console.log(value?.toString()); // "value"
});
// Disable read-your-writes
await db.doTransaction(
async (tn) => {
tn.set("key", "new value");
// Won't see the write we just did
const value = await tn.get("key");
console.log(value?.toString()); // Old value from database
// Use snapshot for non-conflicting reads
const snapshot = tn.snapshot();
const snapshotValue = await snapshot.get("other_key");
},
{
read_your_writes_disable: true,
}
);Configure conflict detection behavior.
/**
* Conflict management options
*/
interface TransactionOptions {
/** Disable write conflict checking (causal consistency) */
causal_write_risky?: true;
/** Disable read conflict checking (causal consistency) */
causal_read_risky?: true;
/** Next write has no write conflict range */
next_write_no_write_conflict_range?: true;
/** Report conflicting keys in error */
report_conflicting_keys?: true;
}Usage Example:
import fdb, { TransactionOptionCode } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Disable conflicts for cache updates
await db.doTransaction(async (tn) => {
tn.setOption(TransactionOptionCode.CausalWriteRisky);
// These writes won't cause conflicts
tn.set("cache:item1", "data1");
tn.set("cache:item2", "data2");
});
// Disable conflict for specific write
await db.doTransaction(async (tn) => {
// Normal write (conflicts)
tn.set("counter", "1");
// No conflict for this write
tn.setOption(TransactionOptionCode.NextWriteNoWriteConflictRange);
tn.set("log:entry", "logged");
});
// Report conflicting keys
await db.doTransaction(
async (tn) => {
tn.set("key", "value");
},
{
report_conflicting_keys: true,
}
).catch((error) => {
console.log("Conflicting keys:", error);
});Guidelines for effective configuration.
Usage Example:
import fdb, { StreamingMode } from "foundationdb";
fdb.setAPIVersion(620);
// Practice 1: Configure network once at startup
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_format: "json",
});
const db = fdb.open();
// Practice 2: Set sensible database defaults
db.setNativeOptions({
transaction_timeout: 10000, // 10 seconds
transaction_retry_limit: 100,
max_watches: 20000,
});
// Practice 3: Use appropriate streaming modes
async function querySmallRange() {
return await db.getRangeAll("config:", "config:~", {
streamingMode: StreamingMode.WantAll,
});
}
async function queryLargeRange() {
const items = [];
for await (const batch of db.getRange("data:", "data:~", {
streamingMode: StreamingMode.Serial,
})) {
items.push(batch);
}
return items;
}
// Practice 4: Set priorities appropriately
async function criticalOperation() {
await db.doTransaction(
async (tn) => {
// High priority
tn.set("critical", "data");
},
{ priority_system_immediate: true }
);
}
async function backgroundJob() {
await db.doTransaction(
async (tn) => {
// Low priority
for (let i = 0; i < 10000; i++) {
tn.set(`batch:${i}`, `data${i}`);
}
},
{ priority_batch: true }
);
}
// Practice 5: Monitor transaction sizes
async function largeOperation() {
await db.doTransaction(async (tn) => {
for (let i = 0; i < 10000; i++) {
tn.set(`key:${i}`, `value:${i}`);
// Check size periodically
if (i % 1000 === 0) {
const size = tn.getApproximateSize();
if (size > 5000000) {
console.warn("Transaction getting large:", size);
}
}
}
});
}
// Practice 6: Use appropriate timeouts
async function quickRead() {
await db.doTransaction(
async (tn) => {
return await tn.get("key");
},
{ timeout: 1000 } // 1 second
);
}
async function longOperation() {
await db.doTransaction(
async (tn) => {
// Long-running query
return await tn.getRangeAll("data:", "data:~");
},
{ timeout: 60000 } // 60 seconds
);
}Properly shutdown network for clean exits.
/**
* Synchronously stop the FDB network thread
* Call during application shutdown
*/
function stopNetworkSync(): void;Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Use database
await db.set("key", "value");
// Clean shutdown
db.close();
fdb.stopNetworkSync();
// Or with signal handling
process.on("SIGINT", () => {
console.log("Shutting down...");
db.close();
fdb.stopNetworkSync();
process.exit(0);
});Test FoundationDB operations with Jest's async/await support and lifecycle hooks.
import fdb from "foundationdb";
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "@jest/globals";
describe("FoundationDB Tests", () => {
let db: ReturnType<typeof fdb.open>;
beforeAll(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
afterAll(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
// Clear test data
await db.clearRangeStartsWith("test:");
});
test("should perform basic operations", async () => {
await db.set("test:key", "value");
const result = await db.get("test:key");
expect(result?.toString()).toBe("value");
});
test("should handle transactions", async () => {
const count = await db.doTransaction(async (tn) => {
tn.set("test:counter", "5");
const val = await tn.get("test:counter");
return parseInt(val?.toString() || "0");
});
expect(count).toBe(5);
});
test("should handle errors gracefully", async () => {
await expect(async () => {
await db.doTransaction(async (tn) => {
throw new Error("Test error");
});
}).rejects.toThrow("Test error");
});
});Use Mocha with Promise-based tests and proper cleanup.
import fdb from "foundationdb";
import { describe, before, after, beforeEach, it } from "mocha";
import { expect } from "chai";
describe("FoundationDB Operations", function() {
this.timeout(10000); // 10 second timeout
let db: ReturnType<typeof fdb.open>;
before(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
after(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
await db.clearRangeStartsWith("test:");
});
it("should read and write data", async () => {
await db.set("test:mocha", "data");
const value = await db.get("test:mocha");
expect(value?.toString()).to.equal("data");
});
it("should handle concurrent operations", async () => {
const operations = Array.from({ length: 10 }, (_, i) =>
db.set(`test:item:${i}`, `value${i}`)
);
await Promise.all(operations);
const items = await db.getRangeAllStartsWith("test:item:");
expect(items).to.have.lengthOf(10);
});
it("should support atomic operations", async () => {
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await Promise.all([
db.add("test:atomic", delta),
db.add("test:atomic", delta),
db.add("test:atomic", delta)
]);
const result = await db.get("test:atomic");
expect(result?.readBigInt64LE(0)).to.equal(3n);
});
});Leverage Vitest's fast execution and modern testing features.
import fdb from "foundationdb";
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "vitest";
describe("FoundationDB with Vitest", () => {
let db: ReturnType<typeof fdb.open>;
beforeAll(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
afterAll(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
await db.clearRangeStartsWith("test:");
});
test("concurrent writes with snapshot isolation", async () => {
const writes = Array.from({ length: 100 }, (_, i) =>
db.set(`test:concurrent:${i}`, `value${i}`)
);
await Promise.all(writes);
const results = await db.getRangeAllStartsWith("test:concurrent:");
expect(results).toHaveLength(100);
});
test("transaction retry logic", async () => {
let attempts = 0;
const result = await db.doTransaction(async (tn) => {
attempts++;
const value = await tn.get("test:retry");
tn.set("test:retry", (parseInt(value?.toString() || "0") + 1).toString());
return attempts;
});
expect(result).toBeGreaterThanOrEqual(1);
});
test("range queries with limits", async () => {
for (let i = 0; i < 50; i++) {
await db.set(`test:range:${i.toString().padStart(3, "0")}`, `value${i}`);
}
const page1 = await db.getRangeAll("test:range:", "test:range:~", { limit: 10 });
const page2 = await db.getRangeAll("test:range:", "test:range:~", {
limit: 10,
reverse: true
});
expect(page1).toHaveLength(10);
expect(page2).toHaveLength(10);
expect(page1[0][0].toString()).not.toBe(page2[0][0].toString());
});
});Helper functions for common test scenarios.
import fdb, { FDBError } from "foundationdb";
export class FDBTestHelper {
private db: ReturnType<typeof fdb.open>;
private testPrefix: string;
constructor(testPrefix = "test:") {
fdb.setAPIVersion(620);
this.db = fdb.open();
this.testPrefix = testPrefix;
}
getDB() {
return this.db;
}
async cleanup() {
await this.db.clearRangeStartsWith(this.testPrefix);
}
async close() {
this.db.close();
}
async withTransaction<T>(
fn: (tn: any) => Promise<T>,
opts?: any
): Promise<T> {
return await this.db.doTransaction(fn, opts);
}
async expectError(
fn: () => Promise<any>,
errorCode?: number
): Promise<FDBError> {
try {
await fn();
throw new Error("Expected operation to throw");
} catch (error) {
if (error instanceof FDBError) {
if (errorCode !== undefined && error.code !== errorCode) {
throw new Error(`Expected error code ${errorCode}, got ${error.code}`);
}
return error;
}
throw error;
}
}
async populateTestData(count: number, prefix?: string) {
const pfx = prefix || this.testPrefix;
const operations = Array.from({ length: count }, (_, i) =>
this.db.set(`${pfx}${i}`, `value${i}`)
);
await Promise.all(operations);
}
async assertKeyExists(key: string): Promise<Buffer> {
const value = await this.db.get(key);
if (value === undefined) {
throw new Error(`Expected key "${key}" to exist`);
}
return value;
}
async assertKeyNotExists(key: string): Promise<void> {
const value = await this.db.get(key);
if (value !== undefined) {
throw new Error(`Expected key "${key}" to not exist`);
}
}
}
// Usage in tests
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "vitest";
describe("Using FDBTestHelper", () => {
let helper: FDBTestHelper;
beforeAll(() => {
helper = new FDBTestHelper("test:helper:");
});
afterAll(async () => {
await helper.cleanup();
await helper.close();
});
beforeEach(async () => {
await helper.cleanup();
});
test("populate and verify test data", async () => {
await helper.populateTestData(10);
await helper.assertKeyExists("test:helper:0");
await helper.assertKeyExists("test:helper:9");
await helper.assertKeyNotExists("test:helper:10");
});
test("transaction helper", async () => {
const result = await helper.withTransaction(async (tn) => {
tn.set("test:helper:tx", "value");
return "success";
});
expect(result).toBe("success");
});
});Manage database connections efficiently across your application.
import fdb from "foundationdb";
class FDBConnectionPool {
private static instance: FDBConnectionPool;
private db: ReturnType<typeof fdb.open> | null = null;
private initialized = false;
private constructor() {}
static getInstance(): FDBConnectionPool {
if (!FDBConnectionPool.instance) {
FDBConnectionPool.instance = new FDBConnectionPool();
}
return FDBConnectionPool.instance;
}
async initialize(opts?: { clusterFile?: string; trace?: string }) {
if (this.initialized) return;
try {
fdb.setAPIVersion(620);
if (opts?.trace) {
fdb.configNetwork({
trace_enable: opts.trace,
trace_format: "json",
});
}
this.db = fdb.open(opts?.clusterFile);
this.db.setNativeOptions({
transaction_timeout: 10000,
transaction_retry_limit: 100,
max_watches: 20000,
});
this.initialized = true;
} catch (error) {
console.error("Failed to initialize FDB connection:", error);
throw error;
}
}
getDatabase(): ReturnType<typeof fdb.open> {
if (!this.db) {
throw new Error("Database not initialized. Call initialize() first.");
}
return this.db;
}
async shutdown() {
if (this.db) {
this.db.close();
fdb.stopNetworkSync();
this.db = null;
this.initialized = false;
}
}
isInitialized(): boolean {
return this.initialized;
}
}
// Usage across application
export const fdbPool = FDBConnectionPool.getInstance();
// In application startup
await fdbPool.initialize({ trace: "./fdb-traces" });
// In any module
import { fdbPool } from "./fdb-pool";
async function getData(key: string) {
const db = fdbPool.getDatabase();
return await db.get(key);
}
// Graceful shutdown
process.on("SIGTERM", async () => {
await fdbPool.shutdown();
process.exit(0);
});Implement robust retry logic for transient failures.
import fdb, { FDBError } from "foundationdb";
async function withRetry<T>(
operation: () => Promise<T>,
maxRetries = 5,
baseDelay = 100
): Promise<T> {
let lastError: Error | undefined;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
lastError = error as Error;
// Don't retry on non-retryable errors
if (error instanceof FDBError && error.code === 1007) {
throw error; // Not retryable
}
if (attempt < maxRetries) {
const delay = baseDelay * Math.pow(2, attempt);
const jitter = Math.random() * delay * 0.1;
await new Promise((resolve) => setTimeout(resolve, delay + jitter));
}
}
}
throw new Error(`Operation failed after ${maxRetries} retries: ${lastError?.message}`);
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const result = await withRetry(async () => {
return await db.doTransaction(async (tn) => {
const value = await tn.get("critical:data");
tn.set("critical:data", "updated");
return value;
});
});Protect against cascading failures with circuit breaker.
import fdb, { FDBError } from "foundationdb";
class CircuitBreaker {
private failures = 0;
private lastFailureTime = 0;
private state: "closed" | "open" | "half-open" = "closed";
constructor(
private threshold = 5,
private timeout = 60000,
private resetTimeout = 30000
) {}
async execute<T>(operation: () => Promise<T>): Promise<T> {
if (this.state === "open") {
if (Date.now() - this.lastFailureTime > this.resetTimeout) {
this.state = "half-open";
} else {
throw new Error("Circuit breaker is OPEN");
}
}
try {
const result = await operation();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
throw error;
}
}
private onSuccess() {
this.failures = 0;
this.state = "closed";
}
private onFailure() {
this.failures++;
this.lastFailureTime = Date.now();
if (this.failures >= this.threshold) {
this.state = "open";
}
}
getState() {
return this.state;
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const breaker = new CircuitBreaker(5, 60000, 30000);
async function safeQuery(key: string): Promise<Buffer | undefined> {
return await breaker.execute(async () => {
return await db.get(key);
});
}Process large datasets efficiently with automatic chunking.
import fdb from "foundationdb";
async function processBatchInChunks<T>(
db: ReturnType<typeof fdb.open>,
prefix: string,
processor: (batch: Array<[Buffer, Buffer]>) => Promise<T[]>,
chunkSize = 100
): Promise<T[]> {
const results: T[] = [];
let startKey = prefix;
while (true) {
const chunk = await db.getRangeAll(
startKey,
prefix + "~",
{ limit: chunkSize }
);
if (chunk.length === 0) break;
const chunkResults = await processor(chunk);
results.push(...chunkResults);
if (chunk.length < chunkSize) break;
// Continue from after last key
startKey = chunk[chunk.length - 1][0].toString() + "\x00";
}
return results;
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const processed = await processBatchInChunks(
db,
"users:",
async (batch) => {
return batch.map(([key, value]) => ({
key: key.toString(),
parsed: JSON.parse(value.toString()),
}));
},
50
);Implement application-level caching with FDB watches.
import fdb from "foundationdb";
class FDBCache<T> {
private cache = new Map<string, { value: T; watch: any }>();
constructor(private db: ReturnType<typeof fdb.open>) {}
async get(key: string, parser: (buf: Buffer) => T): Promise<T | undefined> {
// Check cache first
const cached = this.cache.get(key);
if (cached) {
return cached.value;
}
// Fetch and cache with watch
const watch = await this.db.getAndWatch(key);
if (watch.value === undefined) {
return undefined;
}
const value = parser(watch.value);
this.cache.set(key, { value, watch });
// Invalidate on change
watch.promise.then(() => {
this.cache.delete(key);
});
return value;
}
async set(key: string, value: T, serializer: (val: T) => Buffer) {
await this.db.set(key, serializer(value));
this.cache.delete(key); // Invalidate cache
}
clear() {
this.cache.forEach(({ watch }) => watch.cancel());
this.cache.clear();
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const cache = new FDBCache(db);
const user = await cache.get(
"user:123",
(buf) => JSON.parse(buf.toString())
);
await cache.set(
"user:123",
{ name: "Alice", email: "alice@example.com" },
(val) => Buffer.from(JSON.stringify(val))
);Implement distributed locking for coordination.
import fdb from "foundationdb";
class DistributedLock {
private lockKey: string;
private lockValue: string;
constructor(
private db: ReturnType<typeof fdb.open>,
lockName: string,
private ttl = 30000
) {
this.lockKey = `locks:${lockName}`;
this.lockValue = `${Date.now()}-${Math.random()}`;
}
async acquire(timeout = 10000): Promise<boolean> {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
try {
const acquired = await this.db.doTransaction(async (tn) => {
const existing = await tn.get(this.lockKey);
if (existing === undefined) {
// Lock is free
tn.set(this.lockKey, this.lockValue);
return true;
}
// Check if lock expired
const lockData = existing.toString();
const lockTime = parseInt(lockData.split("-")[0]);
if (Date.now() - lockTime > this.ttl) {
// Lock expired, take it
tn.set(this.lockKey, this.lockValue);
return true;
}
return false;
});
if (acquired) return true;
} catch (error) {
// Transaction conflict, retry
}
// Wait before retry
await new Promise((resolve) => setTimeout(resolve, 100));
}
return false;
}
async release(): Promise<void> {
await this.db.doTransaction(async (tn) => {
const existing = await tn.get(this.lockKey);
if (existing?.toString() === this.lockValue) {
tn.clear(this.lockKey);
}
});
}
async withLock<T>(fn: () => Promise<T>, timeout = 10000): Promise<T> {
const acquired = await this.acquire(timeout);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${this.lockKey}`);
}
try {
return await fn();
} finally {
await this.release();
}
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const lock = new DistributedLock(db, "resource:123", 30000);
await lock.withLock(async () => {
// Critical section - only one process can execute this at a time
const value = await db.get("shared:resource");
await db.set("shared:resource", "updated");
});Implement event sourcing with ordered event storage.
import fdb, { tuple } from "foundationdb";
interface Event {
type: string;
data: any;
timestamp: number;
version?: Buffer;
}
class EventStore {
private db: ReturnType<typeof fdb.open>;
constructor() {
fdb.setAPIVersion(620);
this.db = fdb.open()
.withKeyEncoding(fdb.encoders.tuple)
.withValueEncoding(fdb.encoders.json);
}
async appendEvent(
streamId: string,
eventType: string,
data: any
): Promise<Buffer> {
return await this.db.doTransaction(async (tn) => {
const key = ["events", streamId, tuple.unboundVersionstamp()];
const event: Event = {
type: eventType,
data,
timestamp: Date.now(),
};
tn.setVersionstampedKey(key, event);
const versionstamp = tn.getVersionstamp();
return versionstamp.promise;
});
}
async getEvents(
streamId: string,
fromVersion?: Buffer,
limit?: number
): Promise<Event[]> {
const start = fromVersion
? ["events", streamId, fromVersion]
: ["events", streamId];
const events = await this.db.getRangeAll(
start,
["events", streamId, Buffer.from([0xff])],
{ limit }
);
return events.map(([key, value]) => ({
...(value as any),
version: key[2] as Buffer,
}));
}
async replay(
streamId: string,
handler: (event: Event) => void | Promise<void>
): Promise<void> {
const events = await this.getEvents(streamId);
for (const event of events) {
await handler(event);
}
}
async getSnapshot(streamId: string): Promise<any> {
const events = await this.getEvents(streamId);
// Rebuild state from events
let state: any = {};
for (const event of events) {
state = this.applyEvent(state, event);
}
return state;
}
private applyEvent(state: any, event: Event): any {
// Apply event to state based on event type
switch (event.type) {
case "created":
return { ...event.data, created: true };
case "updated":
return { ...state, ...event.data };
case "deleted":
return { ...state, deleted: true };
default:
return state;
}
}
}
// Usage
const store = new EventStore();
// Append events
await store.appendEvent("order:123", "OrderCreated", {
items: ["item1", "item2"],
total: 100,
});
await store.appendEvent("order:123", "ItemAdded", {
item: "item3",
});
await store.appendEvent("order:123", "OrderConfirmed", {
confirmedAt: Date.now(),
});
// Replay events
await store.replay("order:123", (event) => {
console.log(`Event: ${event.type}`, event.data);
});
// Get current snapshot
const currentState = await store.getSnapshot("order:123");
console.log("Current state:", currentState);Understanding and handling common FoundationDB error codes.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleFDBErrors() {
try {
await db.doTransaction(async (tn) => {
tn.set("key", "value");
});
} catch (error) {
if (error instanceof FDBError) {
switch (error.code) {
case 1007: // Transaction too old
console.error("Transaction took too long - increase timeout");
break;
case 1009: // Request for future version
console.error("Clock skew detected - check system time");
break;
case 1020: // Not committed (transaction may have succeeded)
console.error("Commit status unknown - check if data was written");
break;
case 1021: // Transaction cancelled
console.error("Transaction was cancelled");
break;
case 1025: // Transaction timed out
console.error("Operation exceeded timeout limit");
break;
case 2017: // Transaction too large
console.error("Transaction size exceeds limit - split into smaller transactions");
break;
default:
console.error(`FDB Error ${error.code}: ${error.message}`);
}
} else {
console.error("Non-FDB error:", error);
}
}
}Handle transaction conflicts with proper retry logic.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleConflicts() {
let attempts = 0;
const maxAttempts = 5;
while (attempts < maxAttempts) {
try {
const result = await db.doTransaction(async (tn) => {
attempts++;
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
// Simulate some processing
await new Promise(resolve => setTimeout(resolve, 10));
tn.set("counter", (count + 1).toString());
return count + 1;
});
console.log(`Success after ${attempts} attempts:`, result);
return result;
} catch (error) {
if (error instanceof FDBError && error.code === 1007) {
console.log(`Attempt ${attempts} failed with conflict, retrying...`);
if (attempts >= maxAttempts) {
throw new Error(`Failed after ${maxAttempts} attempts`);
}
// doTransaction handles retry automatically, but showing manual retry for illustration
} else {
throw error;
}
}
}
}Handle and prevent timeout errors effectively.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleTimeouts() {
try {
await db.doTransaction(async (tn) => {
// Long-running operation
for await (const batch of tn.getRangeBatch("data:", "data:~")) {
// Process batch
await processBatch(batch);
}
}, {
timeout: 30000, // 30 second timeout
});
} catch (error) {
if (error instanceof FDBError && error.code === 1025) {
console.error("Transaction timed out");
// Strategies to fix:
// 1. Increase timeout
// 2. Split into smaller transactions
// 3. Use snapshot reads where possible
// 4. Optimize query performance
}
throw error;
}
}
async function processBatch(batch: any) {
// Batch processing logic
}Handle network-related errors and connectivity issues.
import fdb, { FDBError } from "foundationdb";
async function handleNetworkErrors() {
fdb.setAPIVersion(620);
try {
const db = fdb.open("/path/to/fdb.cluster");
await db.doTransaction(async (tn) => {
tn.set("key", "value");
});
} catch (error) {
if (error instanceof FDBError) {
if (error.code === 1031) {
console.error("Cannot connect to cluster - check network and cluster file");
} else if (error.code === 1032) {
console.error("Cluster file invalid or corrupted");
} else if (error.code === 2501) {
console.error("No coordinators available - check FDB cluster health");
}
}
throw error;
}
}Handle directory-specific errors.
import fdb, { directory, DirectoryError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleDirectoryErrors() {
try {
// Try to create existing directory
const dir = await directory.create(db, ["myapp", "users"]);
} catch (error) {
if (error instanceof DirectoryError) {
console.error("Directory operation failed:", error.message);
// Check specific error messages
if (error.message.includes("already exists")) {
console.log("Directory exists, opening instead");
const dir = await directory.open(db, ["myapp", "users"]);
} else if (error.message.includes("does not exist")) {
console.log("Directory missing, creating");
const dir = await directory.create(db, ["myapp", "users"]);
} else if (error.message.includes("layer mismatch")) {
console.error("Directory layer type mismatch");
}
}
throw error;
}
}Implement graceful error recovery patterns.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function safeOperation<T>(
operation: () => Promise<T>,
fallback?: T
): Promise<T | undefined> {
try {
return await operation();
} catch (error) {
if (error instanceof FDBError) {
console.error(`FDB Error ${error.code}:`, error.message);
// Return fallback for specific errors
if (error.code === 1025 || error.code === 1007) {
console.log("Using fallback value due to transient error");
return fallback;
}
}
// Re-throw non-recoverable errors
throw error;
}
}
// Usage
const value = await safeOperation(
async () => await db.get("config:setting"),
Buffer.from("default-value")
);
// With retry wrapper
async function withGracefulRetry<T>(
operation: () => Promise<T>,
maxRetries = 3
): Promise<T> {
let lastError: Error;
for (let i = 0; i < maxRetries; i++) {
try {
return await operation();
} catch (error) {
lastError = error as Error;
if (error instanceof FDBError) {
// Don't retry fatal errors
if ([1031, 1032, 2501].includes(error.code)) {
throw error;
}
}
// Exponential backoff
await new Promise(resolve =>
setTimeout(resolve, Math.pow(2, i) * 100)
);
}
}
throw new Error(`Failed after ${maxRetries} retries: ${lastError!.message}`);
}Diagnose and fix common performance problems.
Issue: Slow Transactions
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Large transaction with many operations
async function slowTransaction() {
await db.doTransaction(async (tn) => {
// Thousands of operations in single transaction
for (let i = 0; i < 10000; i++) {
tn.set(`key:${i}`, `value:${i}`);
}
});
}
// Solution: Split into smaller transactions
async function fastTransactions() {
const batchSize = 100;
for (let i = 0; i < 10000; i += batchSize) {
await db.doTransaction(async (tn) => {
for (let j = i; j < i + batchSize && j < 10000; j++) {
tn.set(`key:${j}`, `value:${j}`);
}
});
}
}
// Solution: Use appropriate transaction options
await db.doTransaction(async (tn) => {
// Process data
}, {
timeout: 30000,
size_limit: 50000000,
});Issue: High Conflict Rate
// Problem: Many transactions competing for same keys
async function highConflict() {
await Promise.all(
Array.from({ length: 100 }, () =>
db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
})
)
);
}
// Solution: Use atomic operations
async function lowConflict() {
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await Promise.all(
Array.from({ length: 100 }, () =>
db.add("counter", delta)
)
);
}
// Solution: Shard hot keys
class ShardedCounter {
constructor(private db: typeof db, private shards = 10) {}
async increment() {
const shard = Math.floor(Math.random() * this.shards);
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await this.db.add(`counter:${shard}`, delta);
}
async getTotal(): Promise<number> {
let total = 0;
for (let i = 0; i < this.shards; i++) {
const value = await this.db.get(`counter:${i}`);
if (value) {
total += Number(value.readBigInt64LE(0));
}
}
return total;
}
}Issue: Memory Usage
// Problem: Loading too much data at once
async function highMemory() {
const allData = await db.getRangeAll("data:", "data:~");
// Process huge dataset - may cause OOM
}
// Solution: Use streaming with batches
async function lowMemory() {
for await (const batch of db.getRangeBatch("data:", "data:~")) {
// Process one batch at a time
await processBatch(batch);
}
}
// Solution: Use appropriate streaming mode
import { StreamingMode } from "foundationdb";
async function optimizedStreaming() {
for await (const batch of db.getRangeBatch("data:", "data:~", {
streamingMode: StreamingMode.Small, // Smaller batches
})) {
await processBatch(batch);
}
}
async function processBatch(batch: any) {
// Process batch
}Diagnose and resolve connection issues.
Issue: Cannot Connect to Cluster
import fdb from "foundationdb";
// Check 1: Verify cluster file
console.log("Checking cluster file...");
try {
const clusterFile = "/etc/foundationdb/fdb.cluster";
const fs = require("fs");
const content = fs.readFileSync(clusterFile, "utf8");
console.log("Cluster file content:", content);
} catch (error) {
console.error("Cannot read cluster file:", error);
}
// Check 2: Test connection
fdb.setAPIVersion(620);
try {
const db = fdb.open();
await db.get("test");
console.log("Connection successful");
} catch (error) {
console.error("Connection failed:", error);
}
// Check 3: Verify network configuration
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_format: "json",
});
// Check traces for connection errorsIssue: Transaction Timeouts
// Diagnostic: Check transaction size
await db.doTransaction(async (tn) => {
// Perform operations
const size = tn.getApproximateSize();
console.log("Transaction size:", size);
if (size > 5000000) {
console.warn("Transaction size large, may timeout");
}
});
// Solution: Increase timeout or split transaction
db.setNativeOptions({
transaction_timeout: 30000, // 30 seconds
});
// Or per-transaction
await db.doTransaction(async (tn) => {
// Operations
}, {
timeout: 60000, // 60 seconds
});Issue: Watch Not Triggering
// Problem: Watch created outside transaction
// const watch = db.watch("key"); // ERROR
// Solution: Create watch in transaction
const watch = await db.doTransaction(async (tn) => {
return tn.watch("key");
});
// Problem: Awaiting watch inside transaction
await db.doTransaction(async (tn) => {
const watch = tn.watch("key");
// await watch.promise; // DEADLOCK!
return watch;
}).then(async (watch) => {
await watch.promise; // Correct
});
// Problem: Too many watches
db.setNativeOptions({
max_watches: 20000, // Increase limit
});Prevent and diagnose data integrity problems.
Issue: Encoding Mismatch
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Writing with one encoder, reading with another
await db.withValueEncoding(fdb.encoders.json)
.set("key", { value: 123 });
const wrong = await db.withValueEncoding(fdb.encoders.string)
.get("key"); // Wrong encoding!
// Solution: Consistent encoder usage
const jsonDb = db.withValueEncoding(fdb.encoders.json);
await jsonDb.set("key", { value: 123 });
const correct = await jsonDb.get("key"); // { value: 123 }
// Solution: Document encoding choices
class UserStore {
private db: ReturnType<typeof fdb.open>;
constructor(db: ReturnType<typeof fdb.open>) {
this.db = db
.at("users:")
.withKeyEncoding(fdb.encoders.string)
.withValueEncoding(fdb.encoders.json);
}
async save(id: string, user: any) {
await this.db.set(id, user);
}
async load(id: string) {
return await this.db.get(id);
}
}Issue: Lost Updates
// Problem: Not using transactions properly
async function lostUpdate() {
const value = await db.get("counter");
const count = parseInt(value?.toString() || "0");
// Another process might update here!
await db.set("counter", (count + 1).toString());
}
// Solution: Use transactions
async function safeUpdate() {
await db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
});
}
// Better: Use atomic operations
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await db.add("counter", delta);Issue: Directory Conflicts
import fdb, { directory } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Racing directory creation
async function racingCreation() {
try {
const dir = await directory.create(db, ["myapp", "tenant"]);
} catch (error) {
// Second process fails
}
}
// Solution: Use createOrOpen
async function safeCreation() {
const dir = await directory.createOrOpen(db, ["myapp", "tenant"]);
// Works for all processes
}
// Solution: Handle errors gracefully
async function robustCreation() {
try {
const dir = await directory.create(db, ["myapp", "tenant"]);
} catch (error) {
if (error.message.includes("already exists")) {
const dir = await directory.open(db, ["myapp", "tenant"]);
return dir;
}
throw error;
}
}Tools and techniques for debugging issues.
Enable Tracing
import fdb from "foundationdb";
// Enable detailed tracing
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_format: "json",
trace_log_group: "myapp",
});
fdb.setAPIVersion(620);
const db = fdb.open();
// Check traces at ./fdb-traces/*.json
// Look for: errors, warnings, slow_task eventsTransaction Debugging
import fdb, { TransactionOptionCode } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
await db.doTransaction(async (tn) => {
// Enable transaction logging
tn.setOption(TransactionOptionCode.DebugTransactionIdentifier, "my-tx-123");
tn.setOption(TransactionOptionCode.LogTransaction);
// Perform operations
tn.set("key", "value");
}, {
debug_transaction_identifier: "test-transaction",
log_transaction: true,
});
// Check traces for transaction detailsPerformance Profiling
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function profileOperation() {
const start = Date.now();
await db.doTransaction(async (tn) => {
const opStart = Date.now();
const value = await tn.get("key");
console.log(`Get took ${Date.now() - opStart}ms`);
const setStart = Date.now();
tn.set("key", "value");
console.log(`Set took ${Date.now() - setStart}ms`);
});
console.log(`Total transaction: ${Date.now() - start}ms`);
}
// Monitor transaction sizes
await db.doTransaction(async (tn) => {
for (let i = 0; i < 1000; i++) {
tn.set(`key:${i}`, `value:${i}`);
if (i % 100 === 0) {
console.log(`Size at ${i}:`, tn.getApproximateSize());
}
}
});1. Keep Transactions Short
Minimize transaction duration to reduce conflicts and avoid timeouts.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Bad: Long-running computation in transaction
await db.doTransaction(async (tn) => {
const data = await tn.get("data");
// Expensive computation
const result = await expensiveComputation(data);
tn.set("result", result);
});
// Good: Compute outside transaction
const data = await db.get("data");
const result = await expensiveComputation(data);
await db.doTransaction(async (tn) => {
tn.set("result", result);
});2. Use Atomic Operations for Counters
Avoid read-modify-write patterns for counters.
// Bad: Read-modify-write
await db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
});
// Good: Atomic add
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await db.add("counter", delta);3. Use Snapshot Reads When Possible
Reduce conflicts by using snapshot reads for non-critical data.
await db.doTransaction(async (tn) => {
// Critical read (causes conflicts)
const critical = await tn.get("critical:data");
// Non-critical read (no conflicts)
const metadata = await tn.snapshot().get("metadata");
// Write based on critical data
tn.set("result", processData(critical));
});4. Batch Related Operations
Group related operations in single transactions.
// Bad: Multiple transactions
await db.set("user:alice:name", "Alice");
await db.set("user:alice:email", "alice@example.com");
await db.set("user:alice:age", "30");
// Good: Single transaction
await db.doTransaction(async (tn) => {
tn.set("user:alice:name", "Alice");
tn.set("user:alice:email", "alice@example.com");
tn.set("user:alice:age", "30");
});5. Handle Large Datasets with Chunking
Split large operations into manageable chunks.
async function processLargeDataset() {
let startKey = "data:";
const chunkSize = 1000;
while (true) {
const chunk = await db.getRangeAll(
startKey,
"data:~",
{ limit: chunkSize }
);
if (chunk.length === 0) break;
await db.doTransaction(async (tn) => {
for (const [key, value] of chunk) {
// Process item
tn.set(key.toString() + ":processed", "true");
}
});
if (chunk.length < chunkSize) break;
startKey = chunk[chunk.length - 1][0].toString() + "\x00";
}
}6. Use Hierarchical Key Structure
Organize keys hierarchically for efficient queries.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Good: Hierarchical structure
await db.set("app:users:alice:profile", "...");
await db.set("app:users:alice:settings", "...");
await db.set("app:users:bob:profile", "...");
await db.set("app:orders:12345:items", "...");
// Query all user data
const aliceData = await db.getRangeAllStartsWith("app:users:alice:");
// Query all orders
const orders = await db.getRangeAllStartsWith("app:orders:");7. Use Tuple Encoding for Composite Keys
Leverage tuple encoding for structured keys.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open().withKeyEncoding(fdb.encoders.tuple);
// Store with composite keys
await db.set(["user", "alice", "profile"], "data");
await db.set(["user", "alice", "settings"], "data");
await db.set(["order", 12345, "items"], "data");
// Query by prefix
const aliceData = await db.getRangeAllStartsWith(["user", "alice"]);
const orders = await db.getRangeAllStartsWith(["order"]);8. Use Directories for Multi-Tenancy
Isolate tenant data with directory layer.
import fdb, { directory } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Create tenant directories
const tenant1 = await directory.createOrOpen(db, ["tenants", "acme"]);
const tenant2 = await directory.createOrOpen(db, ["tenants", "techcorp"]);
// Scoped databases
const acmeDb = db.at(tenant1);
const techcorpDb = db.at(tenant2);
// Isolated data
await acmeDb.set("data", "Acme data");
await techcorpDb.set("data", "Techcorp data");9. Implement Retry Logic with Backoff
Handle transient failures gracefully.
async function withRetry<T>(
operation: () => Promise<T>,
maxRetries = 5,
baseDelay = 100
): Promise<T> {
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
if (attempt === maxRetries) throw error;
const delay = baseDelay * Math.pow(2, attempt);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw new Error("Should not reach here");
}
const result = await withRetry(() => db.get("key"));10. Monitor Transaction Metrics
Track performance and errors for optimization.
class MetricsCollector {
private metrics = {
transactionCount: 0,
errorCount: 0,
avgDuration: 0,
};
async trackTransaction<T>(
operation: () => Promise<T>
): Promise<T> {
const start = Date.now();
try {
const result = await operation();
this.metrics.transactionCount++;
const duration = Date.now() - start;
this.metrics.avgDuration =
(this.metrics.avgDuration * (this.metrics.transactionCount - 1) + duration) /
this.metrics.transactionCount;
return result;
} catch (error) {
this.metrics.errorCount++;
throw error;
}
}
getMetrics() {
return { ...this.metrics };
}
}
const metrics = new MetricsCollector();
await metrics.trackTransaction(() =>
db.doTransaction(async (tn) => {
tn.set("key", "value");
})
);
console.log("Metrics:", metrics.getMetrics());11. Close Connections Properly
Ensure clean shutdown of database connections.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Use try-finally for cleanup
try {
await db.set("key", "value");
} finally {
db.close();
fdb.stopNetworkSync();
}
// Handle graceful shutdown
process.on("SIGTERM", () => {
console.log("Shutting down...");
db.close();
fdb.stopNetworkSync();
process.exit(0);
});
process.on("SIGINT", () => {
console.log("Interrupted, cleaning up...");
db.close();
fdb.stopNetworkSync();
process.exit(0);
});12. Use Connection Pooling
Reuse database connections across application.
class DatabasePool {
private static db: ReturnType<typeof fdb.open> | null = null;
static initialize() {
if (!this.db) {
fdb.setAPIVersion(620);
this.db = fdb.open();
this.db.setNativeOptions({
transaction_timeout: 10000,
max_watches: 20000,
});
}
}
static getDatabase() {
if (!this.db) {
throw new Error("Database not initialized");
}
return this.db;
}
static shutdown() {
if (this.db) {
this.db.close();
fdb.stopNetworkSync();
this.db = null;
}
}
}
// Initialize once at startup
DatabasePool.initialize();
// Use throughout application
const db = DatabasePool.getDatabase();
await db.set("key", "value");
// Shutdown on exit
process.on("exit", () => {
DatabasePool.shutdown();
});