Versionstamp operations for generating unique temporal identifiers with transaction commit versions. Versionstamps are 10-byte values containing a transaction version and order information, enabling globally unique, monotonically increasing identifiers.
Understanding versionstamp format and components.
/**
* Unbound versionstamp structure
* Used when setting versionstamped keys/values before commit
*/
interface UnboundStamp {
/** The data buffer containing the versionstamp placeholder */
data: Buffer;
/** Position of the versionstamp in data (10 bytes) */
stampPos: number;
/** Position of the transaction code (2 bytes, optional) */
codePos?: number;
}
/**
* Versionstamp format (10 bytes total):
* - Bytes 0-7: Transaction version (8 bytes)
* - Bytes 8-9: Transaction batch order (2 bytes)
*
* The version is assigned when the transaction commits
* The batch order distinguishes versionstamps within the same transaction
*/Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Versionstamp is 10 bytes
// - First 8 bytes: transaction version
// - Last 2 bytes: order within transaction
await db.doTransaction(async (tn) => {
const stamp = tn.getVersionstamp();
return stamp.promise;
}).then((versionstamp) => {
console.log("Versionstamp:", versionstamp);
console.log("Length:", versionstamp.length); // 10
});Retrieve the versionstamp for the current transaction after commit.
/**
* Get the transaction's versionstamp
* Returns an object with a promise that resolves after commit
* The versionstamp is a 10-byte Buffer uniquely identifying the transaction
* @returns Object with promise that resolves to 10-byte versionstamp Buffer
*/
getVersionstamp(): { promise: Promise<Buffer> };Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Get versionstamp after commit
const versionstamp = await db.doTransaction(async (tn) => {
tn.set("data", "value");
// Get versionstamp (will resolve after commit)
const stamp = tn.getVersionstamp();
// Cannot await directly inside transaction - would deadlock!
// Return the promise object instead
return stamp.promise;
});
console.log("Transaction versionstamp:", versionstamp);
// Use versionstamp as a key
await db.set(`log:${versionstamp.toString("hex")}`, "log entry");Set keys with embedded versionstamps for time-ordered data.
/**
* Set a key with an embedded versionstamp
* The versionstamp placeholder in the key will be replaced with the actual versionstamp at commit time
* Requires key encoder that supports unbound versionstamps (like tuple encoding)
* @param key - Key with versionstamp placeholder (e.g., tuple with unboundVersionstamp)
* @param value - Value to set
* @param bakeAfterCommit - Whether to bake the versionstamp back into the key object after commit (default: true)
*/
setVersionstampedKey(
key: KeyIn,
value: ValIn,
bakeAfterCommit?: boolean
): void;
/**
* Set a key with versionstamp suffix
* Creates a key: [prefix][10-byte versionstamp][suffix]
* Simpler alternative when not using tuple encoding
* @param key - Base key prefix
* @param value - Value to set
* @param suffix - Optional suffix after versionstamp
*/
setVersionstampSuffixedKey(
key: KeyIn,
value: ValIn,
suffix?: Buffer
): void;Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Using tuple encoding with versionstamp
const tupleDb = db.withKeyEncoding(fdb.encoders.tuple);
await tupleDb.doTransaction(async (tn) => {
// Create time-ordered log entries
const logKey = ["log", tuple.unboundVersionstamp()];
tn.setVersionstampedKey(logKey, "Log message 1");
// Multiple entries in same transaction
const logKey2 = ["log", tuple.unboundVersionstamp()];
tn.setVersionstampedKey(logKey2, "Log message 2");
// Keys will have unique versionstamps with different batch orders
});
// Using suffix method without tuple encoding
await db.doTransaction(async (tn) => {
// Key will be: "events:" + 10-byte versionstamp + suffix
tn.setVersionstampSuffixedKey(
"events:",
JSON.stringify({ type: "click" }),
Buffer.from(":metadata")
);
});
// Query time-ordered data
const tupleDb2 = db.withKeyEncoding(fdb.encoders.tuple);
const logs = await tupleDb2.getRangeAllStartsWith(["log"]);
// Logs are automatically in chronological order by versionstamp
for (const [key, value] of logs) {
console.log("Key:", key, "Value:", value.toString());
}Set values with embedded versionstamps.
/**
* Set a value with an embedded versionstamp
* The versionstamp placeholder in the value will be replaced with the actual versionstamp at commit time
* Requires value encoder that supports unbound versionstamps (like tuple encoding)
* @param key - The key to set
* @param value - Value with versionstamp placeholder
* @param bakeAfterCommit - Whether to bake the versionstamp back into the value object after commit (default: true)
*/
setVersionstampedValue(
key: KeyIn,
value: ValIn,
bakeAfterCommit?: boolean
): void;
/**
* Set a value with versionstamp prefix
* Creates a value: [prefix][10-byte versionstamp][value]
* Simpler alternative when not using tuple encoding
* @param key - The key to set
* @param value - Optional base value (appended after versionstamp)
* @param prefix - Optional prefix before versionstamp
*/
setVersionstampPrefixedValue(
key: KeyIn,
value?: ValIn,
prefix?: Buffer
): void;
/**
* Get a value and extract versionstamp prefix
* Helper for reading values created with setVersionstampPrefixedValue
* Assumes versionstamp is at the start of the value
* @param key - The key to get
* @returns Object with stamp and value, or null if key doesn't exist
*/
getVersionstampPrefixedValue(
key: KeyIn
): Promise<{ stamp: Buffer; value?: ValOut } | null>;Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Using tuple encoding with versionstamp in value
const tupleValueDb = db.withValueEncoding(fdb.encoders.tuple);
await tupleValueDb.doTransaction(async (tn) => {
// Store data with embedded timestamp
const value = ["user:alice", { action: "login" }, tuple.unboundVersionstamp()];
tn.setVersionstampedValue("events:login:123", value);
// Value will have actual versionstamp after commit
});
// Using prefix method
await db.doTransaction(async (tn) => {
// Value will be: 10-byte versionstamp + JSON data
tn.setVersionstampPrefixedValue(
"item:123:history",
Buffer.from(JSON.stringify({ status: "updated" }))
);
});
// Reading versionstamped values
const result = await db.getVersionstampPrefixedValue("item:123:history");
if (result) {
console.log("Versionstamp:", result.stamp.toString("hex"));
console.log("Value:", result.value?.toString());
}Use versionstamps for time-ordered data structures.
Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
const tupleDb = db.withKeyEncoding(fdb.encoders.tuple);
// Time-ordered queue using versionstamps
class VersionstampQueue {
constructor(private db: typeof tupleDb, private prefix: string) {}
async enqueue(item: any): Promise<void> {
await this.db.doTransaction(async (tn) => {
const key = [this.prefix, tuple.unboundVersionstamp()];
tn.setVersionstampedKey(key, JSON.stringify(item));
});
}
async dequeue(batchSize: number = 1): Promise<any[]> {
return await this.db.doTransaction(async (tn) => {
const items = await tn.getRangeAll(
[this.prefix],
undefined,
{ limit: batchSize }
);
const results = [];
for (const [key, value] of items) {
results.push(JSON.parse(value.toString()));
tn.clear(key);
}
return results;
});
}
async peek(count: number = 10): Promise<any[]> {
const items = await this.db.getRangeAll(
[this.prefix],
undefined,
{ limit: count }
);
return items.map(([_, value]) => JSON.parse(value.toString()));
}
}
// Usage
const queue = new VersionstampQueue(tupleDb, "queue");
// Enqueue items (automatically ordered by commit time)
await queue.enqueue({ task: "process_order", orderId: 123 });
await queue.enqueue({ task: "send_email", userId: 456 });
// Dequeue items in order
const items = await queue.dequeue(2);
console.log("Dequeued:", items);Utility functions for working with versionstamps.
/**
* Tuple package versionstamp functions
*/
const tuple: {
/**
* Create an unbound versionstamp marker for use in tuples
* @param code - Optional user code (0-65535) to distinguish versionstamps in same transaction
* @returns UnboundStamp placeholder
*/
unboundVersionstamp(code?: number): UnboundStamp;
/**
* Pack tuple with versionstamp
* @param items - Tuple items
* @returns Buffer with packed tuple
*/
pack(items: TupleItem[]): Buffer;
/**
* Unpack tuple containing versionstamp
* @param buffer - Packed tuple buffer
* @returns Unpacked tuple items
*/
unpack(buffer: Buffer): TupleItem[];
};
type TupleItem = null | Buffer | string | number | boolean | TupleItem[] | UnboundStamp;Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Create unbound versionstamp with custom code
const stamp1 = tuple.unboundVersionstamp(100);
const stamp2 = tuple.unboundVersionstamp(200);
await db.doTransaction(async (tn) => {
// Multiple versionstamps in same transaction with different codes
const key1 = tuple.pack(["events", stamp1]);
const key2 = tuple.pack(["events", stamp2]);
tn.setVersionstampedKeyRaw(key1, "event 1");
tn.setVersionstampedKeyRaw(key2, "event 2");
// Both will have same transaction version but different batch orders
});Manage multiple versionstamps within a single transaction.
/**
* Get next transaction ID for versionstamps
* Used internally to assign unique batch orders within a transaction
* @returns Next available transaction code (0-65535)
*/
getNextTransactionID(): number;Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
const tupleDb = db.withKeyEncoding(fdb.encoders.tuple);
await tupleDb.doTransaction(async (tn) => {
// Automatically managed transaction IDs
for (let i = 0; i < 100; i++) {
const key = ["batch", tuple.unboundVersionstamp()];
tn.setVersionstampedKey(key, `item ${i}`);
// Each gets unique batch order automatically
}
});
// Manual ID management (advanced)
await db.doTransaction(async (tn) => {
const id1 = tn.getNextTransactionID();
const id2 = tn.getNextTransactionID();
console.log("IDs:", id1, id2); // 0, 1
});Implement distributed logging with automatic ordering.
Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
const tupleDb = db.withKeyEncoding(fdb.encoders.tuple).withValueEncoding(
fdb.encoders.json
);
// Distributed log with automatic ordering
class VersionstampLog {
constructor(private db: typeof tupleDb, private logName: string) {}
async append(entry: any): Promise<void> {
await this.db.doTransaction(async (tn) => {
const key = [this.logName, tuple.unboundVersionstamp()];
tn.setVersionstampedKey(
key,
{
...entry,
timestamp: Date.now(),
},
false // Don't bake - we don't need the key back
);
});
}
async getRecent(limit: number = 100): Promise<any[]> {
const entries = await this.db.getRangeAll(
[this.logName],
undefined,
{
limit,
reverse: true, // Most recent first
}
);
return entries.map(([_, value]) => value);
}
async getSince(versionstamp: Buffer, limit?: number): Promise<any[]> {
const entries = await this.db.getRangeAll(
keySelector.firstGreaterThan([this.logName, versionstamp]),
[this.logName, Buffer.from([0xff])],
{ limit }
);
return entries.map(([_, value]) => value);
}
async trim(keepCount: number): Promise<number> {
return await this.db.doTransaction(async (tn) => {
// Get all entries
const entries = await tn.getRangeAll([this.logName]);
if (entries.length <= keepCount) return 0;
// Delete old entries
const toDelete = entries.length - keepCount;
for (let i = 0; i < toDelete; i++) {
tn.clear(entries[i][0]);
}
return toDelete;
});
}
}
// Usage
const log = new VersionstampLog(tupleDb, "app_log");
// Multiple processes can append concurrently
await Promise.all([
log.append({ level: "info", message: "Server started" }),
log.append({ level: "debug", message: "Cache warmed" }),
log.append({ level: "error", message: "Connection failed" }),
]);
// Retrieve recent entries (automatically in order)
const recent = await log.getRecent(10);
for (const entry of recent) {
console.log(`[${entry.level}] ${entry.message}`);
}
// Trim old entries
const deleted = await log.trim(1000);
console.log(`Deleted ${deleted} old log entries`);Implement event sourcing with versionstamp ordering.
Usage Example:
import fdb, { tuple } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
const tupleDb = db.withKeyEncoding(fdb.encoders.tuple).withValueEncoding(
fdb.encoders.json
);
// Event store with versionstamp ordering
class EventStore {
constructor(private db: typeof tupleDb) {}
async appendEvent(
streamId: string,
eventType: string,
data: any
): Promise<Buffer> {
return await this.db.doTransaction(async (tn) => {
const key = ["events", streamId, tuple.unboundVersionstamp()];
tn.setVersionstampedKey(key, {
type: eventType,
data,
timestamp: Date.now(),
});
// Return versionstamp for this event
const stamp = tn.getVersionstamp();
return stamp.promise;
});
}
async getEvents(streamId: string, afterVersion?: Buffer): Promise<any[]> {
const start = afterVersion
? keySelector.firstGreaterThan(["events", streamId, afterVersion])
: ["events", streamId];
const events = await this.db.getRangeAll(start, [
"events",
streamId,
Buffer.from([0xff]),
]);
return events.map(([key, value]) => ({
version: key[2], // The versionstamp
...value,
}));
}
async getEventsSince(
globalVersion: Buffer,
limit?: number
): Promise<any[]> {
const events = await this.db.getRangeAll(
keySelector.firstGreaterThan(["events", "", globalVersion]),
["events", "\xFF"],
{ limit }
);
return events.map(([key, value]) => ({
streamId: key[1],
version: key[2],
...value,
}));
}
}
// Usage
const store = new EventStore(tupleDb);
// Append events
const v1 = await store.appendEvent("order:123", "OrderCreated", {
items: ["item1"],
});
const v2 = await store.appendEvent("order:123", "ItemAdded", {
item: "item2",
});
const v3 = await store.appendEvent("order:123", "OrderSubmitted", {});
console.log("Event versions:", v1, v2, v3);
// Replay events for a stream
const events = await store.getEvents("order:123");
console.log("Order events:", events);
// Get all new events since a version
const newEvents = await store.getEventsSince(v1);
console.log("Events after v1:", newEvents);Low-level versionstamp operations for advanced use cases.
/**
* Set versionstamped key with raw Buffer
* @param keyBytes - Key buffer with versionstamp placeholder
* @param value - Value to set
*/
setVersionstampedKeyRaw(keyBytes: Buffer, value: ValIn): void;
/**
* Set versionstamped key with prefix and suffix
* @param prefix - Prefix before versionstamp
* @param suffix - Suffix after versionstamp
* @param value - Value to set
*/
setVersionstampedKeyBuf(
prefix: Buffer | undefined,
suffix: Buffer | undefined,
value: ValIn
): void;
/**
* Set versionstamped value with raw Buffer
* @param key - The key to set
* @param value - Value buffer with versionstamp placeholder
*/
setVersionstampedValueRaw(key: KeyIn, value: Buffer): void;Usage Example:
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
await db.doTransaction(async (tn) => {
// Raw versionstamped key
const prefix = Buffer.from("log:");
const suffix = Buffer.from(":metadata");
tn.setVersionstampedKeyBuf(prefix, suffix, "log entry");
// Key will be: "log:" + 10-byte versionstamp + ":metadata"
});Test FoundationDB operations with Jest's async/await support and lifecycle hooks.
import fdb from "foundationdb";
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "@jest/globals";
describe("FoundationDB Tests", () => {
let db: ReturnType<typeof fdb.open>;
beforeAll(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
afterAll(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
// Clear test data
await db.clearRangeStartsWith("test:");
});
test("should perform basic operations", async () => {
await db.set("test:key", "value");
const result = await db.get("test:key");
expect(result?.toString()).toBe("value");
});
test("should handle transactions", async () => {
const count = await db.doTransaction(async (tn) => {
tn.set("test:counter", "5");
const val = await tn.get("test:counter");
return parseInt(val?.toString() || "0");
});
expect(count).toBe(5);
});
test("should handle errors gracefully", async () => {
await expect(async () => {
await db.doTransaction(async (tn) => {
throw new Error("Test error");
});
}).rejects.toThrow("Test error");
});
});Use Mocha with Promise-based tests and proper cleanup.
import fdb from "foundationdb";
import { describe, before, after, beforeEach, it } from "mocha";
import { expect } from "chai";
describe("FoundationDB Operations", function() {
this.timeout(10000); // 10 second timeout
let db: ReturnType<typeof fdb.open>;
before(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
after(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
await db.clearRangeStartsWith("test:");
});
it("should read and write data", async () => {
await db.set("test:mocha", "data");
const value = await db.get("test:mocha");
expect(value?.toString()).to.equal("data");
});
it("should handle concurrent operations", async () => {
const operations = Array.from({ length: 10 }, (_, i) =>
db.set(`test:item:${i}`, `value${i}`)
);
await Promise.all(operations);
const items = await db.getRangeAllStartsWith("test:item:");
expect(items).to.have.lengthOf(10);
});
it("should support atomic operations", async () => {
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await Promise.all([
db.add("test:atomic", delta),
db.add("test:atomic", delta),
db.add("test:atomic", delta)
]);
const result = await db.get("test:atomic");
expect(result?.readBigInt64LE(0)).to.equal(3n);
});
});Leverage Vitest's fast execution and modern testing features.
import fdb from "foundationdb";
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "vitest";
describe("FoundationDB with Vitest", () => {
let db: ReturnType<typeof fdb.open>;
beforeAll(() => {
fdb.setAPIVersion(620);
db = fdb.open();
});
afterAll(() => {
db.close();
fdb.stopNetworkSync();
});
beforeEach(async () => {
await db.clearRangeStartsWith("test:");
});
test("concurrent writes with snapshot isolation", async () => {
const writes = Array.from({ length: 100 }, (_, i) =>
db.set(`test:concurrent:${i}`, `value${i}`)
);
await Promise.all(writes);
const results = await db.getRangeAllStartsWith("test:concurrent:");
expect(results).toHaveLength(100);
});
test("transaction retry logic", async () => {
let attempts = 0;
const result = await db.doTransaction(async (tn) => {
attempts++;
const value = await tn.get("test:retry");
tn.set("test:retry", (parseInt(value?.toString() || "0") + 1).toString());
return attempts;
});
expect(result).toBeGreaterThanOrEqual(1);
});
test("range queries with limits", async () => {
for (let i = 0; i < 50; i++) {
await db.set(`test:range:${i.toString().padStart(3, "0")}`, `value${i}`);
}
const page1 = await db.getRangeAll("test:range:", "test:range:~", { limit: 10 });
const page2 = await db.getRangeAll("test:range:", "test:range:~", {
limit: 10,
reverse: true
});
expect(page1).toHaveLength(10);
expect(page2).toHaveLength(10);
expect(page1[0][0].toString()).not.toBe(page2[0][0].toString());
});
});Helper functions for common test scenarios.
import fdb, { FDBError } from "foundationdb";
export class FDBTestHelper {
private db: ReturnType<typeof fdb.open>;
private testPrefix: string;
constructor(testPrefix = "test:") {
fdb.setAPIVersion(620);
this.db = fdb.open();
this.testPrefix = testPrefix;
}
getDB() {
return this.db;
}
async cleanup() {
await this.db.clearRangeStartsWith(this.testPrefix);
}
async close() {
this.db.close();
}
async withTransaction<T>(
fn: (tn: any) => Promise<T>,
opts?: any
): Promise<T> {
return await this.db.doTransaction(fn, opts);
}
async expectError(
fn: () => Promise<any>,
errorCode?: number
): Promise<FDBError> {
try {
await fn();
throw new Error("Expected operation to throw");
} catch (error) {
if (error instanceof FDBError) {
if (errorCode !== undefined && error.code !== errorCode) {
throw new Error(`Expected error code ${errorCode}, got ${error.code}`);
}
return error;
}
throw error;
}
}
async populateTestData(count: number, prefix?: string) {
const pfx = prefix || this.testPrefix;
const operations = Array.from({ length: count }, (_, i) =>
this.db.set(`${pfx}${i}`, `value${i}`)
);
await Promise.all(operations);
}
async assertKeyExists(key: string): Promise<Buffer> {
const value = await this.db.get(key);
if (value === undefined) {
throw new Error(`Expected key "${key}" to exist`);
}
return value;
}
async assertKeyNotExists(key: string): Promise<void> {
const value = await this.db.get(key);
if (value !== undefined) {
throw new Error(`Expected key "${key}" to not exist`);
}
}
}
// Usage in tests
import { describe, beforeAll, afterAll, beforeEach, test, expect } from "vitest";
describe("Using FDBTestHelper", () => {
let helper: FDBTestHelper;
beforeAll(() => {
helper = new FDBTestHelper("test:helper:");
});
afterAll(async () => {
await helper.cleanup();
await helper.close();
});
beforeEach(async () => {
await helper.cleanup();
});
test("populate and verify test data", async () => {
await helper.populateTestData(10);
await helper.assertKeyExists("test:helper:0");
await helper.assertKeyExists("test:helper:9");
await helper.assertKeyNotExists("test:helper:10");
});
test("transaction helper", async () => {
const result = await helper.withTransaction(async (tn) => {
tn.set("test:helper:tx", "value");
return "success";
});
expect(result).toBe("success");
});
});Manage database connections efficiently across your application.
import fdb from "foundationdb";
class FDBConnectionPool {
private static instance: FDBConnectionPool;
private db: ReturnType<typeof fdb.open> | null = null;
private initialized = false;
private constructor() {}
static getInstance(): FDBConnectionPool {
if (!FDBConnectionPool.instance) {
FDBConnectionPool.instance = new FDBConnectionPool();
}
return FDBConnectionPool.instance;
}
async initialize(opts?: { clusterFile?: string; trace?: string }) {
if (this.initialized) return;
try {
fdb.setAPIVersion(620);
if (opts?.trace) {
fdb.configNetwork({
trace_enable: opts.trace,
trace_format: "json",
});
}
this.db = fdb.open(opts?.clusterFile);
this.db.setNativeOptions({
transaction_timeout: 10000,
transaction_retry_limit: 100,
max_watches: 20000,
});
this.initialized = true;
} catch (error) {
console.error("Failed to initialize FDB connection:", error);
throw error;
}
}
getDatabase(): ReturnType<typeof fdb.open> {
if (!this.db) {
throw new Error("Database not initialized. Call initialize() first.");
}
return this.db;
}
async shutdown() {
if (this.db) {
this.db.close();
fdb.stopNetworkSync();
this.db = null;
this.initialized = false;
}
}
isInitialized(): boolean {
return this.initialized;
}
}
// Usage across application
export const fdbPool = FDBConnectionPool.getInstance();
// In application startup
await fdbPool.initialize({ trace: "./fdb-traces" });
// In any module
import { fdbPool } from "./fdb-pool";
async function getData(key: string) {
const db = fdbPool.getDatabase();
return await db.get(key);
}
// Graceful shutdown
process.on("SIGTERM", async () => {
await fdbPool.shutdown();
process.exit(0);
});Implement robust retry logic for transient failures.
import fdb, { FDBError } from "foundationdb";
async function withRetry<T>(
operation: () => Promise<T>,
maxRetries = 5,
baseDelay = 100
): Promise<T> {
let lastError: Error | undefined;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
lastError = error as Error;
// Don't retry on non-retryable errors
if (error instanceof FDBError && error.code === 1007) {
throw error; // Not retryable
}
if (attempt < maxRetries) {
const delay = baseDelay * Math.pow(2, attempt);
const jitter = Math.random() * delay * 0.1;
await new Promise((resolve) => setTimeout(resolve, delay + jitter));
}
}
}
throw new Error(`Operation failed after ${maxRetries} retries: ${lastError?.message}`);
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const result = await withRetry(async () => {
return await db.doTransaction(async (tn) => {
const value = await tn.get("critical:data");
tn.set("critical:data", "updated");
return value;
});
});Protect against cascading failures with circuit breaker.
import fdb, { FDBError } from "foundationdb";
class CircuitBreaker {
private failures = 0;
private lastFailureTime = 0;
private state: "closed" | "open" | "half-open" = "closed";
constructor(
private threshold = 5,
private timeout = 60000,
private resetTimeout = 30000
) {}
async execute<T>(operation: () => Promise<T>): Promise<T> {
if (this.state === "open") {
if (Date.now() - this.lastFailureTime > this.resetTimeout) {
this.state = "half-open";
} else {
throw new Error("Circuit breaker is OPEN");
}
}
try {
const result = await operation();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
throw error;
}
}
private onSuccess() {
this.failures = 0;
this.state = "closed";
}
private onFailure() {
this.failures++;
this.lastFailureTime = Date.now();
if (this.failures >= this.threshold) {
this.state = "open";
}
}
getState() {
return this.state;
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const breaker = new CircuitBreaker(5, 60000, 30000);
async function safeQuery(key: string): Promise<Buffer | undefined> {
return await breaker.execute(async () => {
return await db.get(key);
});
}Process large datasets efficiently with automatic chunking.
import fdb from "foundationdb";
async function processBatchInChunks<T>(
db: ReturnType<typeof fdb.open>,
prefix: string,
processor: (batch: Array<[Buffer, Buffer]>) => Promise<T[]>,
chunkSize = 100
): Promise<T[]> {
const results: T[] = [];
let startKey = prefix;
while (true) {
const chunk = await db.getRangeAll(
startKey,
prefix + "~",
{ limit: chunkSize }
);
if (chunk.length === 0) break;
const chunkResults = await processor(chunk);
results.push(...chunkResults);
if (chunk.length < chunkSize) break;
// Continue from after last key
startKey = chunk[chunk.length - 1][0].toString() + "\x00";
}
return results;
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const processed = await processBatchInChunks(
db,
"users:",
async (batch) => {
return batch.map(([key, value]) => ({
key: key.toString(),
parsed: JSON.parse(value.toString()),
}));
},
50
);Implement application-level caching with FDB watches.
import fdb from "foundationdb";
class FDBCache<T> {
private cache = new Map<string, { value: T; watch: any }>();
constructor(private db: ReturnType<typeof fdb.open>) {}
async get(key: string, parser: (buf: Buffer) => T): Promise<T | undefined> {
// Check cache first
const cached = this.cache.get(key);
if (cached) {
return cached.value;
}
// Fetch and cache with watch
const watch = await this.db.getAndWatch(key);
if (watch.value === undefined) {
return undefined;
}
const value = parser(watch.value);
this.cache.set(key, { value, watch });
// Invalidate on change
watch.promise.then(() => {
this.cache.delete(key);
});
return value;
}
async set(key: string, value: T, serializer: (val: T) => Buffer) {
await this.db.set(key, serializer(value));
this.cache.delete(key); // Invalidate cache
}
clear() {
this.cache.forEach(({ watch }) => watch.cancel());
this.cache.clear();
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const cache = new FDBCache(db);
const user = await cache.get(
"user:123",
(buf) => JSON.parse(buf.toString())
);
await cache.set(
"user:123",
{ name: "Alice", email: "alice@example.com" },
(val) => Buffer.from(JSON.stringify(val))
);Implement distributed locking for coordination.
import fdb from "foundationdb";
class DistributedLock {
private lockKey: string;
private lockValue: string;
constructor(
private db: ReturnType<typeof fdb.open>,
lockName: string,
private ttl = 30000
) {
this.lockKey = `locks:${lockName}`;
this.lockValue = `${Date.now()}-${Math.random()}`;
}
async acquire(timeout = 10000): Promise<boolean> {
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
try {
const acquired = await this.db.doTransaction(async (tn) => {
const existing = await tn.get(this.lockKey);
if (existing === undefined) {
// Lock is free
tn.set(this.lockKey, this.lockValue);
return true;
}
// Check if lock expired
const lockData = existing.toString();
const lockTime = parseInt(lockData.split("-")[0]);
if (Date.now() - lockTime > this.ttl) {
// Lock expired, take it
tn.set(this.lockKey, this.lockValue);
return true;
}
return false;
});
if (acquired) return true;
} catch (error) {
// Transaction conflict, retry
}
// Wait before retry
await new Promise((resolve) => setTimeout(resolve, 100));
}
return false;
}
async release(): Promise<void> {
await this.db.doTransaction(async (tn) => {
const existing = await tn.get(this.lockKey);
if (existing?.toString() === this.lockValue) {
tn.clear(this.lockKey);
}
});
}
async withLock<T>(fn: () => Promise<T>, timeout = 10000): Promise<T> {
const acquired = await this.acquire(timeout);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${this.lockKey}`);
}
try {
return await fn();
} finally {
await this.release();
}
}
}
// Usage
fdb.setAPIVersion(620);
const db = fdb.open();
const lock = new DistributedLock(db, "resource:123", 30000);
await lock.withLock(async () => {
// Critical section - only one process can execute this at a time
const value = await db.get("shared:resource");
await db.set("shared:resource", "updated");
});Implement event sourcing with ordered event storage.
import fdb, { tuple } from "foundationdb";
interface Event {
type: string;
data: any;
timestamp: number;
version?: Buffer;
}
class EventStore {
private db: ReturnType<typeof fdb.open>;
constructor() {
fdb.setAPIVersion(620);
this.db = fdb.open()
.withKeyEncoding(fdb.encoders.tuple)
.withValueEncoding(fdb.encoders.json);
}
async appendEvent(
streamId: string,
eventType: string,
data: any
): Promise<Buffer> {
return await this.db.doTransaction(async (tn) => {
const key = ["events", streamId, tuple.unboundVersionstamp()];
const event: Event = {
type: eventType,
data,
timestamp: Date.now(),
};
tn.setVersionstampedKey(key, event);
const versionstamp = tn.getVersionstamp();
return versionstamp.promise;
});
}
async getEvents(
streamId: string,
fromVersion?: Buffer,
limit?: number
): Promise<Event[]> {
const start = fromVersion
? ["events", streamId, fromVersion]
: ["events", streamId];
const events = await this.db.getRangeAll(
start,
["events", streamId, Buffer.from([0xff])],
{ limit }
);
return events.map(([key, value]) => ({
...(value as any),
version: key[2] as Buffer,
}));
}
async replay(
streamId: string,
handler: (event: Event) => void | Promise<void>
): Promise<void> {
const events = await this.getEvents(streamId);
for (const event of events) {
await handler(event);
}
}
async getSnapshot(streamId: string): Promise<any> {
const events = await this.getEvents(streamId);
// Rebuild state from events
let state: any = {};
for (const event of events) {
state = this.applyEvent(state, event);
}
return state;
}
private applyEvent(state: any, event: Event): any {
// Apply event to state based on event type
switch (event.type) {
case "created":
return { ...event.data, created: true };
case "updated":
return { ...state, ...event.data };
case "deleted":
return { ...state, deleted: true };
default:
return state;
}
}
}
// Usage
const store = new EventStore();
// Append events
await store.appendEvent("order:123", "OrderCreated", {
items: ["item1", "item2"],
total: 100,
});
await store.appendEvent("order:123", "ItemAdded", {
item: "item3",
});
await store.appendEvent("order:123", "OrderConfirmed", {
confirmedAt: Date.now(),
});
// Replay events
await store.replay("order:123", (event) => {
console.log(`Event: ${event.type}`, event.data);
});
// Get current snapshot
const currentState = await store.getSnapshot("order:123");
console.log("Current state:", currentState);Understanding and handling common FoundationDB error codes.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleFDBErrors() {
try {
await db.doTransaction(async (tn) => {
tn.set("key", "value");
});
} catch (error) {
if (error instanceof FDBError) {
switch (error.code) {
case 1007: // Transaction too old
console.error("Transaction took too long - increase timeout");
break;
case 1009: // Request for future version
console.error("Clock skew detected - check system time");
break;
case 1020: // Not committed (transaction may have succeeded)
console.error("Commit status unknown - check if data was written");
break;
case 1021: // Transaction cancelled
console.error("Transaction was cancelled");
break;
case 1025: // Transaction timed out
console.error("Operation exceeded timeout limit");
break;
case 2017: // Transaction too large
console.error("Transaction size exceeds limit - split into smaller transactions");
break;
default:
console.error(`FDB Error ${error.code}: ${error.message}`);
}
} else {
console.error("Non-FDB error:", error);
}
}
}Handle transaction conflicts with proper retry logic.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleConflicts() {
let attempts = 0;
const maxAttempts = 5;
while (attempts < maxAttempts) {
try {
const result = await db.doTransaction(async (tn) => {
attempts++;
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
// Simulate some processing
await new Promise(resolve => setTimeout(resolve, 10));
tn.set("counter", (count + 1).toString());
return count + 1;
});
console.log(`Success after ${attempts} attempts:`, result);
return result;
} catch (error) {
if (error instanceof FDBError && error.code === 1007) {
console.log(`Attempt ${attempts} failed with conflict, retrying...`);
if (attempts >= maxAttempts) {
throw new Error(`Failed after ${maxAttempts} attempts`);
}
// doTransaction handles retry automatically, but showing manual retry for illustration
} else {
throw error;
}
}
}
}Handle and prevent timeout errors effectively.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleTimeouts() {
try {
await db.doTransaction(async (tn) => {
// Long-running operation
for await (const batch of tn.getRangeBatch("data:", "data:~")) {
// Process batch
await processBatch(batch);
}
}, {
timeout: 30000, // 30 second timeout
});
} catch (error) {
if (error instanceof FDBError && error.code === 1025) {
console.error("Transaction timed out");
// Strategies to fix:
// 1. Increase timeout
// 2. Split into smaller transactions
// 3. Use snapshot reads where possible
// 4. Optimize query performance
}
throw error;
}
}
async function processBatch(batch: any) {
// Batch processing logic
}Handle network-related errors and connectivity issues.
import fdb, { FDBError } from "foundationdb";
async function handleNetworkErrors() {
fdb.setAPIVersion(620);
try {
const db = fdb.open("/path/to/fdb.cluster");
await db.doTransaction(async (tn) => {
tn.set("key", "value");
});
} catch (error) {
if (error instanceof FDBError) {
if (error.code === 1031) {
console.error("Cannot connect to cluster - check network and cluster file");
} else if (error.code === 1032) {
console.error("Cluster file invalid or corrupted");
} else if (error.code === 2501) {
console.error("No coordinators available - check FDB cluster health");
}
}
throw error;
}
}Handle directory-specific errors.
import fdb, { directory, DirectoryError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function handleDirectoryErrors() {
try {
// Try to create existing directory
const dir = await directory.create(db, ["myapp", "users"]);
} catch (error) {
if (error instanceof DirectoryError) {
console.error("Directory operation failed:", error.message);
// Check specific error messages
if (error.message.includes("already exists")) {
console.log("Directory exists, opening instead");
const dir = await directory.open(db, ["myapp", "users"]);
} else if (error.message.includes("does not exist")) {
console.log("Directory missing, creating");
const dir = await directory.create(db, ["myapp", "users"]);
} else if (error.message.includes("layer mismatch")) {
console.error("Directory layer type mismatch");
}
}
throw error;
}
}Implement graceful error recovery patterns.
import fdb, { FDBError } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function safeOperation<T>(
operation: () => Promise<T>,
fallback?: T
): Promise<T | undefined> {
try {
return await operation();
} catch (error) {
if (error instanceof FDBError) {
console.error(`FDB Error ${error.code}:`, error.message);
// Return fallback for specific errors
if (error.code === 1025 || error.code === 1007) {
console.log("Using fallback value due to transient error");
return fallback;
}
}
// Re-throw non-recoverable errors
throw error;
}
}
// Usage
const value = await safeOperation(
async () => await db.get("config:setting"),
Buffer.from("default-value")
);
// With retry wrapper
async function withGracefulRetry<T>(
operation: () => Promise<T>,
maxRetries = 3
): Promise<T> {
let lastError: Error;
for (let i = 0; i < maxRetries; i++) {
try {
return await operation();
} catch (error) {
lastError = error as Error;
if (error instanceof FDBError) {
// Don't retry fatal errors
if ([1031, 1032, 2501].includes(error.code)) {
throw error;
}
}
// Exponential backoff
await new Promise(resolve =>
setTimeout(resolve, Math.pow(2, i) * 100)
);
}
}
throw new Error(`Failed after ${maxRetries} retries: ${lastError!.message}`);
}Diagnose and fix common performance problems.
Issue: Slow Transactions
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Large transaction with many operations
async function slowTransaction() {
await db.doTransaction(async (tn) => {
// Thousands of operations in single transaction
for (let i = 0; i < 10000; i++) {
tn.set(`key:${i}`, `value:${i}`);
}
});
}
// Solution: Split into smaller transactions
async function fastTransactions() {
const batchSize = 100;
for (let i = 0; i < 10000; i += batchSize) {
await db.doTransaction(async (tn) => {
for (let j = i; j < i + batchSize && j < 10000; j++) {
tn.set(`key:${j}`, `value:${j}`);
}
});
}
}
// Solution: Use appropriate transaction options
await db.doTransaction(async (tn) => {
// Process data
}, {
timeout: 30000,
size_limit: 50000000,
});Issue: High Conflict Rate
// Problem: Many transactions competing for same keys
async function highConflict() {
await Promise.all(
Array.from({ length: 100 }, () =>
db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
})
)
);
}
// Solution: Use atomic operations
async function lowConflict() {
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await Promise.all(
Array.from({ length: 100 }, () =>
db.add("counter", delta)
)
);
}
// Solution: Shard hot keys
class ShardedCounter {
constructor(private db: typeof db, private shards = 10) {}
async increment() {
const shard = Math.floor(Math.random() * this.shards);
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await this.db.add(`counter:${shard}`, delta);
}
async getTotal(): Promise<number> {
let total = 0;
for (let i = 0; i < this.shards; i++) {
const value = await this.db.get(`counter:${i}`);
if (value) {
total += Number(value.readBigInt64LE(0));
}
}
return total;
}
}Issue: Memory Usage
// Problem: Loading too much data at once
async function highMemory() {
const allData = await db.getRangeAll("data:", "data:~");
// Process huge dataset - may cause OOM
}
// Solution: Use streaming with batches
async function lowMemory() {
for await (const batch of db.getRangeBatch("data:", "data:~")) {
// Process one batch at a time
await processBatch(batch);
}
}
// Solution: Use appropriate streaming mode
import { StreamingMode } from "foundationdb";
async function optimizedStreaming() {
for await (const batch of db.getRangeBatch("data:", "data:~", {
streamingMode: StreamingMode.Small, // Smaller batches
})) {
await processBatch(batch);
}
}
async function processBatch(batch: any) {
// Process batch
}Diagnose and resolve connection issues.
Issue: Cannot Connect to Cluster
import fdb from "foundationdb";
// Check 1: Verify cluster file
console.log("Checking cluster file...");
try {
const clusterFile = "/etc/foundationdb/fdb.cluster";
const fs = require("fs");
const content = fs.readFileSync(clusterFile, "utf8");
console.log("Cluster file content:", content);
} catch (error) {
console.error("Cannot read cluster file:", error);
}
// Check 2: Test connection
fdb.setAPIVersion(620);
try {
const db = fdb.open();
await db.get("test");
console.log("Connection successful");
} catch (error) {
console.error("Connection failed:", error);
}
// Check 3: Verify network configuration
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_format: "json",
});
// Check traces for connection errorsIssue: Transaction Timeouts
// Diagnostic: Check transaction size
await db.doTransaction(async (tn) => {
// Perform operations
const size = tn.getApproximateSize();
console.log("Transaction size:", size);
if (size > 5000000) {
console.warn("Transaction size large, may timeout");
}
});
// Solution: Increase timeout or split transaction
db.setNativeOptions({
transaction_timeout: 30000, // 30 seconds
});
// Or per-transaction
await db.doTransaction(async (tn) => {
// Operations
}, {
timeout: 60000, // 60 seconds
});Issue: Watch Not Triggering
// Problem: Watch created outside transaction
// const watch = db.watch("key"); // ERROR
// Solution: Create watch in transaction
const watch = await db.doTransaction(async (tn) => {
return tn.watch("key");
});
// Problem: Awaiting watch inside transaction
await db.doTransaction(async (tn) => {
const watch = tn.watch("key");
// await watch.promise; // DEADLOCK!
return watch;
}).then(async (watch) => {
await watch.promise; // Correct
});
// Problem: Too many watches
db.setNativeOptions({
max_watches: 20000, // Increase limit
});Prevent and diagnose data integrity problems.
Issue: Encoding Mismatch
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Writing with one encoder, reading with another
await db.withValueEncoding(fdb.encoders.json)
.set("key", { value: 123 });
const wrong = await db.withValueEncoding(fdb.encoders.string)
.get("key"); // Wrong encoding!
// Solution: Consistent encoder usage
const jsonDb = db.withValueEncoding(fdb.encoders.json);
await jsonDb.set("key", { value: 123 });
const correct = await jsonDb.get("key"); // { value: 123 }
// Solution: Document encoding choices
class UserStore {
private db: ReturnType<typeof fdb.open>;
constructor(db: ReturnType<typeof fdb.open>) {
this.db = db
.at("users:")
.withKeyEncoding(fdb.encoders.string)
.withValueEncoding(fdb.encoders.json);
}
async save(id: string, user: any) {
await this.db.set(id, user);
}
async load(id: string) {
return await this.db.get(id);
}
}Issue: Lost Updates
// Problem: Not using transactions properly
async function lostUpdate() {
const value = await db.get("counter");
const count = parseInt(value?.toString() || "0");
// Another process might update here!
await db.set("counter", (count + 1).toString());
}
// Solution: Use transactions
async function safeUpdate() {
await db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
});
}
// Better: Use atomic operations
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await db.add("counter", delta);Issue: Directory Conflicts
import fdb, { directory } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Problem: Racing directory creation
async function racingCreation() {
try {
const dir = await directory.create(db, ["myapp", "tenant"]);
} catch (error) {
// Second process fails
}
}
// Solution: Use createOrOpen
async function safeCreation() {
const dir = await directory.createOrOpen(db, ["myapp", "tenant"]);
// Works for all processes
}
// Solution: Handle errors gracefully
async function robustCreation() {
try {
const dir = await directory.create(db, ["myapp", "tenant"]);
} catch (error) {
if (error.message.includes("already exists")) {
const dir = await directory.open(db, ["myapp", "tenant"]);
return dir;
}
throw error;
}
}Tools and techniques for debugging issues.
Enable Tracing
import fdb from "foundationdb";
// Enable detailed tracing
fdb.configNetwork({
trace_enable: "./fdb-traces",
trace_format: "json",
trace_log_group: "myapp",
});
fdb.setAPIVersion(620);
const db = fdb.open();
// Check traces at ./fdb-traces/*.json
// Look for: errors, warnings, slow_task eventsTransaction Debugging
import fdb, { TransactionOptionCode } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
await db.doTransaction(async (tn) => {
// Enable transaction logging
tn.setOption(TransactionOptionCode.DebugTransactionIdentifier, "my-tx-123");
tn.setOption(TransactionOptionCode.LogTransaction);
// Perform operations
tn.set("key", "value");
}, {
debug_transaction_identifier: "test-transaction",
log_transaction: true,
});
// Check traces for transaction detailsPerformance Profiling
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
async function profileOperation() {
const start = Date.now();
await db.doTransaction(async (tn) => {
const opStart = Date.now();
const value = await tn.get("key");
console.log(`Get took ${Date.now() - opStart}ms`);
const setStart = Date.now();
tn.set("key", "value");
console.log(`Set took ${Date.now() - setStart}ms`);
});
console.log(`Total transaction: ${Date.now() - start}ms`);
}
// Monitor transaction sizes
await db.doTransaction(async (tn) => {
for (let i = 0; i < 1000; i++) {
tn.set(`key:${i}`, `value:${i}`);
if (i % 100 === 0) {
console.log(`Size at ${i}:`, tn.getApproximateSize());
}
}
});1. Keep Transactions Short
Minimize transaction duration to reduce conflicts and avoid timeouts.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Bad: Long-running computation in transaction
await db.doTransaction(async (tn) => {
const data = await tn.get("data");
// Expensive computation
const result = await expensiveComputation(data);
tn.set("result", result);
});
// Good: Compute outside transaction
const data = await db.get("data");
const result = await expensiveComputation(data);
await db.doTransaction(async (tn) => {
tn.set("result", result);
});2. Use Atomic Operations for Counters
Avoid read-modify-write patterns for counters.
// Bad: Read-modify-write
await db.doTransaction(async (tn) => {
const value = await tn.get("counter");
const count = parseInt(value?.toString() || "0");
tn.set("counter", (count + 1).toString());
});
// Good: Atomic add
const delta = Buffer.allocUnsafe(8);
delta.writeBigInt64LE(1n, 0);
await db.add("counter", delta);3. Use Snapshot Reads When Possible
Reduce conflicts by using snapshot reads for non-critical data.
await db.doTransaction(async (tn) => {
// Critical read (causes conflicts)
const critical = await tn.get("critical:data");
// Non-critical read (no conflicts)
const metadata = await tn.snapshot().get("metadata");
// Write based on critical data
tn.set("result", processData(critical));
});4. Batch Related Operations
Group related operations in single transactions.
// Bad: Multiple transactions
await db.set("user:alice:name", "Alice");
await db.set("user:alice:email", "alice@example.com");
await db.set("user:alice:age", "30");
// Good: Single transaction
await db.doTransaction(async (tn) => {
tn.set("user:alice:name", "Alice");
tn.set("user:alice:email", "alice@example.com");
tn.set("user:alice:age", "30");
});5. Handle Large Datasets with Chunking
Split large operations into manageable chunks.
async function processLargeDataset() {
let startKey = "data:";
const chunkSize = 1000;
while (true) {
const chunk = await db.getRangeAll(
startKey,
"data:~",
{ limit: chunkSize }
);
if (chunk.length === 0) break;
await db.doTransaction(async (tn) => {
for (const [key, value] of chunk) {
// Process item
tn.set(key.toString() + ":processed", "true");
}
});
if (chunk.length < chunkSize) break;
startKey = chunk[chunk.length - 1][0].toString() + "\x00";
}
}6. Use Hierarchical Key Structure
Organize keys hierarchically for efficient queries.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Good: Hierarchical structure
await db.set("app:users:alice:profile", "...");
await db.set("app:users:alice:settings", "...");
await db.set("app:users:bob:profile", "...");
await db.set("app:orders:12345:items", "...");
// Query all user data
const aliceData = await db.getRangeAllStartsWith("app:users:alice:");
// Query all orders
const orders = await db.getRangeAllStartsWith("app:orders:");7. Use Tuple Encoding for Composite Keys
Leverage tuple encoding for structured keys.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open().withKeyEncoding(fdb.encoders.tuple);
// Store with composite keys
await db.set(["user", "alice", "profile"], "data");
await db.set(["user", "alice", "settings"], "data");
await db.set(["order", 12345, "items"], "data");
// Query by prefix
const aliceData = await db.getRangeAllStartsWith(["user", "alice"]);
const orders = await db.getRangeAllStartsWith(["order"]);8. Use Directories for Multi-Tenancy
Isolate tenant data with directory layer.
import fdb, { directory } from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Create tenant directories
const tenant1 = await directory.createOrOpen(db, ["tenants", "acme"]);
const tenant2 = await directory.createOrOpen(db, ["tenants", "techcorp"]);
// Scoped databases
const acmeDb = db.at(tenant1);
const techcorpDb = db.at(tenant2);
// Isolated data
await acmeDb.set("data", "Acme data");
await techcorpDb.set("data", "Techcorp data");9. Implement Retry Logic with Backoff
Handle transient failures gracefully.
async function withRetry<T>(
operation: () => Promise<T>,
maxRetries = 5,
baseDelay = 100
): Promise<T> {
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
if (attempt === maxRetries) throw error;
const delay = baseDelay * Math.pow(2, attempt);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw new Error("Should not reach here");
}
const result = await withRetry(() => db.get("key"));10. Monitor Transaction Metrics
Track performance and errors for optimization.
class MetricsCollector {
private metrics = {
transactionCount: 0,
errorCount: 0,
avgDuration: 0,
};
async trackTransaction<T>(
operation: () => Promise<T>
): Promise<T> {
const start = Date.now();
try {
const result = await operation();
this.metrics.transactionCount++;
const duration = Date.now() - start;
this.metrics.avgDuration =
(this.metrics.avgDuration * (this.metrics.transactionCount - 1) + duration) /
this.metrics.transactionCount;
return result;
} catch (error) {
this.metrics.errorCount++;
throw error;
}
}
getMetrics() {
return { ...this.metrics };
}
}
const metrics = new MetricsCollector();
await metrics.trackTransaction(() =>
db.doTransaction(async (tn) => {
tn.set("key", "value");
})
);
console.log("Metrics:", metrics.getMetrics());11. Close Connections Properly
Ensure clean shutdown of database connections.
import fdb from "foundationdb";
fdb.setAPIVersion(620);
const db = fdb.open();
// Use try-finally for cleanup
try {
await db.set("key", "value");
} finally {
db.close();
fdb.stopNetworkSync();
}
// Handle graceful shutdown
process.on("SIGTERM", () => {
console.log("Shutting down...");
db.close();
fdb.stopNetworkSync();
process.exit(0);
});
process.on("SIGINT", () => {
console.log("Interrupted, cleaning up...");
db.close();
fdb.stopNetworkSync();
process.exit(0);
});12. Use Connection Pooling
Reuse database connections across application.
class DatabasePool {
private static db: ReturnType<typeof fdb.open> | null = null;
static initialize() {
if (!this.db) {
fdb.setAPIVersion(620);
this.db = fdb.open();
this.db.setNativeOptions({
transaction_timeout: 10000,
max_watches: 20000,
});
}
}
static getDatabase() {
if (!this.db) {
throw new Error("Database not initialized");
}
return this.db;
}
static shutdown() {
if (this.db) {
this.db.close();
fdb.stopNetworkSync();
this.db = null;
}
}
}
// Initialize once at startup
DatabasePool.initialize();
// Use throughout application
const db = DatabasePool.getDatabase();
await db.set("key", "value");
// Shutdown on exit
process.on("exit", () => {
DatabasePool.shutdown();
});