tessl install tessl/npm-cache-manager@7.2.0Cache Manager for Node.js with support for multi-store caching, background refresh, and Keyv-compatible storage adapters
The wrap() method provides function memoization with intelligent cache management, including background refresh of expiring cache entries. This enables high-performance caching while preventing stale data.
Wraps a function in cache. The first time the function is called, its result is stored in cache. Subsequent calls retrieve from cache instead of executing the function, until the cached value expires.
/**
* Wrap a function in cache with individual TTL and refresh threshold
* @param key - Cache key for storing the result
* @param fnc - Function to execute and cache (can return a value or Promise)
* @param ttl - Optional TTL in milliseconds or function to compute TTL from result
* @param refreshThreshold - Optional refresh threshold in milliseconds or function to compute from result
* @returns Cached or computed value
* @throws Errors from the wrapped function are thrown immediately (not cached)
*/
wrap<T>(
key: string,
fnc: () => T | Promise<T>,
ttl?: number | ((value: T) => number),
refreshThreshold?: number | ((value: T) => number)
): Promise<T>;Behavioral Details:
T is preserved through cachingParameters:
key - Cache key. Must be unique for this cached result. Same key with different functions will collide.fnc - Function to wrap. Should be idempotent. Can be sync or async (return Promise<T>).ttl - Time-to-live in milliseconds, or function (value: T) => number to compute TTL based on result. If omitted, uses cache default.refreshThreshold - When remaining TTL falls below this (in milliseconds), triggers background refresh. Can also be function (value: T) => number. Must be less than TTL.Usage Examples:
import { createCache } from 'cache-manager';
const cache = createCache();
// Expensive database query function
async function fetchUser(id: number) {
console.log('Fetching from database...');
// Simulate database call
await new Promise(resolve => setTimeout(resolve, 1000));
return { id, name: `User ${id}`, email: `user${id}@example.com` };
}
// First call - executes function and caches result
const user1 = await cache.wrap('user:123', () => fetchUser(123), 60000);
// Output: "Fetching from database..."
console.log(user1); // { id: 123, name: 'User 123', email: 'user123@example.com' }
// Second call - returns from cache without executing function
const user2 = await cache.wrap('user:123', () => fetchUser(123), 60000);
// No output - function not called
console.log(user2); // { id: 123, name: 'User 123', email: 'user123@example.com' }
// Different key - executes function
const user3 = await cache.wrap('user:456', () => fetchUser(456), 60000);
// Output: "Fetching from database..."Edge Cases:
import { createCache } from 'cache-manager';
const cache = createCache();
// Empty string key (valid)
await cache.wrap('', () => 'value', 60000);
// Function returning undefined
const undef = await cache.wrap('undef', () => undefined, 60000);
console.log(undef); // undefined (cached)
// Function returning null
const nullVal = await cache.wrap('null', () => null, 60000);
console.log(nullVal); // null (cached)
// Function returning 0 or false (cached, not confused with miss)
const zero = await cache.wrap('zero', () => 0, 60000);
console.log(zero); // 0
const falseVal = await cache.wrap('false', () => false, 60000);
console.log(falseVal); // false
// Synchronous function
const syncResult = await cache.wrap('sync', () => 42, 60000);
console.log(syncResult); // 42
// Async function
const asyncResult = await cache.wrap('async', async () => {
await new Promise(resolve => setTimeout(resolve, 100));
return 'done';
}, 60000);
console.log(asyncResult); // 'done'
// No TTL (uses cache default or no expiration)
const noTtl = await cache.wrap('no-ttl', () => 'value');
// Zero TTL (immediately expires, function called every time)
const alwaysFresh = await cache.wrap('always-fresh', () => Date.now(), 0);
// Very large TTL
const longLived = await cache.wrap('long', () => 'value', Infinity);Wraps a function in cache using an options object for configuration. Provides the same functionality as the basic signature but with named parameters.
/**
* Wrap a function in cache with options object
* @param key - Cache key for storing the result
* @param fnc - Function to execute and cache
* @param options - Configuration options
* @returns Cached or computed value
* @throws Errors from the wrapped function are thrown immediately (not cached)
*/
wrap<T>(
key: string,
fnc: () => T | Promise<T>,
options: WrapOptions<T>
): Promise<T>;
type WrapOptions<T> = {
ttl?: number | ((value: T) => number);
refreshThreshold?: number | ((value: T) => number);
};Usage Examples:
import { createCache } from 'cache-manager';
const cache = createCache();
async function fetchConfig() {
return { theme: 'dark', language: 'en' };
}
// Using options object for clearer code
const config = await cache.wrap('app:config', fetchConfig, {
ttl: 300000, // 5 minutes
refreshThreshold: 60000 // 1 minute
});
console.log(config); // { theme: 'dark', language: 'en' }
// Options with only TTL
const result1 = await cache.wrap('key1', () => 'value', {
ttl: 60000
});
// Options with only refreshThreshold (uses cache default TTL)
const result2 = await cache.wrap('key2', () => 'value', {
refreshThreshold: 10000
});
// Empty options object (uses all defaults)
const result3 = await cache.wrap('key3', () => 'value', {});Wraps a function in cache and returns raw data including the expiration timestamp. Useful when you need to know when the cached value will expire.
/**
* Wrap a function in cache returning raw data with expiration
* @param key - Cache key for storing the result
* @param fnc - Function to execute and cache
* @param options - Configuration options with raw flag
* @returns Raw cached data with value and expiration timestamp
* @throws Errors from the wrapped function are thrown immediately (not cached)
*/
wrap<T>(
key: string,
fnc: () => T | Promise<T>,
options: WrapOptionsRaw<T>
): Promise<StoredDataRaw<T>>;
type WrapOptionsRaw<T> = WrapOptions<T> & {
raw: true;
};
type StoredDataRaw<T> = {
value: T;
expires: number; // Timestamp in milliseconds (Unix epoch)
};Usage Examples:
import { createCache } from 'cache-manager';
const cache = createCache();
async function fetchMetrics() {
return { requests: 1500, errors: 3, uptime: 99.9 };
}
// Get raw data including expiration timestamp
const rawData = await cache.wrap('metrics', fetchMetrics, {
ttl: 30000,
raw: true
});
console.log(rawData.value); // { requests: 1500, errors: 3, uptime: 99.9 }
console.log(rawData.expires); // Timestamp like 1706453821234
// Calculate time until expiration
const now = Date.now();
const secondsUntilExpiry = Math.floor((rawData.expires - now) / 1000);
console.log(`Expires in ${secondsUntilExpiry} seconds`);
// Check if data is stale
const isStale = Date.now() > rawData.expires;
console.log(`Is stale: ${isStale}`);
// Display freshness percentage
const ttl = 30000;
const age = Date.now() - (rawData.expires - ttl);
const freshness = Math.max(0, 100 - (age / ttl) * 100);
console.log(`Freshness: ${freshness.toFixed(1)}%`);Advanced Raw Data Usage:
import { createCache } from 'cache-manager';
const cache = createCache();
// Conditional refresh based on expiration
async function getWithFreshness(key: string, fetchFn: () => Promise<any>) {
const cached = await cache.wrap(key, fetchFn, {
ttl: 60000,
raw: true
});
const remaining = cached.expires - Date.now();
const age = 60000 - remaining;
return {
value: cached.value,
expiresIn: remaining,
age: age,
freshness: (remaining / 60000) * 100,
};
}
// Stale-while-revalidate pattern
async function staleWhileRevalidate(
key: string,
fetchFn: () => Promise<any>,
ttl: number
) {
try {
const cached = await cache.wrap(key, fetchFn, {
ttl,
raw: true
});
const remaining = cached.expires - Date.now();
// If stale, trigger background refresh
if (remaining < 0) {
// Return stale data immediately
const staleValue = cached.value;
// Refresh in background
fetchFn().then(fresh => {
cache.set(key, fresh, ttl);
}).catch(err => {
console.error('Background refresh failed:', err);
});
return staleValue;
}
return cached.value;
} catch (error) {
// On error, try to return stale data
const stale = await cache.get(key);
if (stale !== undefined) {
console.warn('Returning stale data due to error:', error);
return stale;
}
throw error;
}
}The refresh threshold enables background updates of cached values before they expire. When the remaining TTL falls below the threshold, the function is executed in the background while the old value is returned immediately.
import { createCache } from 'cache-manager';
const cache = createCache();
async function fetchData() {
console.log('Fetching data...');
await new Promise(resolve => setTimeout(resolve, 100));
return { timestamp: Date.now() };
}
// Cache with 10 second TTL and 3 second refresh threshold
await cache.wrap('data', fetchData, 10000, 3000);
// Output: "Fetching data..."
// Wait 8 seconds (TTL remaining: ~2 seconds, below threshold)
await new Promise(resolve => setTimeout(resolve, 8000));
// This call returns cached value immediately but triggers background refresh
const result = await cache.wrap('data', fetchData, 10000, 3000);
console.log('Got result immediately:', result);
// Output: "Got result immediately: { timestamp: [old timestamp] }"
// Background: "Fetching data..."
// Wait a moment for background refresh to complete
await new Promise(resolve => setTimeout(resolve, 200));
// Next call gets the refreshed value
const refreshed = await cache.wrap('data', fetchData, 10000, 3000);
console.log('Refreshed result:', refreshed);
// Output: "Refreshed result: { timestamp: [new timestamp] }"Refresh Behavior Details:
import { createCache } from 'cache-manager';
const cache = createCache();
let callCount = 0;
async function trackedFetch() {
callCount++;
console.log(`Fetch call ${callCount}`);
await new Promise(resolve => setTimeout(resolve, 50));
return { count: callCount, timestamp: Date.now() };
}
// Initial call - cache miss
const r1 = await cache.wrap('data', trackedFetch, 5000, 1000);
console.log('Call 1:', r1); // { count: 1, ... }
// Immediate second call - cache hit
const r2 = await cache.wrap('data', trackedFetch, 5000, 1000);
console.log('Call 2:', r2); // { count: 1, ... } (same cached value)
// Wait until below threshold (> 4 seconds)
await new Promise(resolve => setTimeout(resolve, 4500));
// This triggers background refresh
const r3 = await cache.wrap('data', trackedFetch, 5000, 1000);
console.log('Call 3:', r3); // { count: 1, ... } (old value returned immediately)
// Background: "Fetch call 2"
// Wait for refresh to complete
await new Promise(resolve => setTimeout(resolve, 100));
// New cached value available
const r4 = await cache.wrap('data', trackedFetch, 5000, 1000);
console.log('Call 4:', r4); // { count: 2, ... } (refreshed value)TTL and refresh threshold can be computed based on the cached value using functions.
import { createCache } from 'cache-manager';
const cache = createCache();
interface ApiResponse {
data: any;
cacheControl: 'short' | 'medium' | 'long';
priority?: 'high' | 'low';
}
async function fetchApiData(): Promise<ApiResponse> {
return {
data: { items: [1, 2, 3] },
cacheControl: 'medium',
priority: 'high',
};
}
// TTL function determines cache duration based on response
const getTtl = (response: ApiResponse) => {
switch (response.cacheControl) {
case 'short': return 10000; // 10 seconds
case 'medium': return 60000; // 1 minute
case 'long': return 300000; // 5 minutes
default: return 30000; // 30 seconds default
}
};
// Refresh threshold: refresh when 20% of TTL remains
const getRefreshThreshold = (response: ApiResponse) => {
return getTtl(response) * 0.2;
};
const result = await cache.wrap(
'api:data',
fetchApiData,
getTtl,
getRefreshThreshold
);
// TTL will be 60000ms (1 minute) based on 'medium' cacheControl
// Refresh threshold will be 12000ms (12 seconds)
// Advanced: Priority-based TTL
const getPriorityTtl = (response: ApiResponse) => {
const baseTtl = getTtl(response);
// High priority data cached longer
if (response.priority === 'high') {
return baseTtl * 2;
}
return baseTtl;
};
const result2 = await cache.wrap('api:priority', fetchApiData, getPriorityTtl);
// TTL will be 120000ms (2 minutes) due to high priorityComplex Dynamic TTL Patterns:
import { createCache } from 'cache-manager';
const cache = createCache();
interface DataWithMetadata {
value: any;
volatility: 'stable' | 'moderate' | 'volatile';
size: number;
importance: number; // 0-10
}
async function fetchDataWithMetadata(): Promise<DataWithMetadata> {
return {
value: { items: [] },
volatility: 'moderate',
size: 1024,
importance: 7,
};
}
// Complex TTL computation
const computeTtl = (data: DataWithMetadata): number => {
// Base TTL on volatility
let ttl: number;
switch (data.volatility) {
case 'stable': ttl = 600000; break; // 10 minutes
case 'moderate': ttl = 180000; break; // 3 minutes
case 'volatile': ttl = 30000; break; // 30 seconds
default: ttl = 60000; // 1 minute
}
// Adjust for importance (higher importance = longer cache)
ttl *= (1 + data.importance / 10);
// Adjust for size (larger data = longer cache to avoid refetch overhead)
if (data.size > 1024 * 1024) { // > 1MB
ttl *= 1.5;
}
return Math.floor(ttl);
};
const computeRefresh = (data: DataWithMetadata): number => {
const ttl = computeTtl(data);
// High importance: refresh earlier (30% remaining)
if (data.importance >= 8) {
return ttl * 0.3;
}
// Normal: refresh at 20% remaining
return ttl * 0.2;
};
const data = await cache.wrap(
'complex:data',
fetchDataWithMetadata,
computeTtl,
computeRefresh
);import { createCache } from 'cache-manager';
const cache = createCache();
let attempts = 0;
async function unreliableFunction() {
attempts++;
if (attempts <= 2) {
throw new Error('Service temporarily unavailable');
}
return { status: 'success', attempts };
}
// Errors are NOT cached - function is called again on next wrap
try {
const result1 = await cache.wrap('api:call', unreliableFunction, 60000);
console.log(result1);
} catch (error) {
console.error('Attempt 1 failed:', error.message);
}
try {
const result2 = await cache.wrap('api:call', unreliableFunction, 60000);
console.log(result2);
} catch (error) {
console.error('Attempt 2 failed:', error.message);
}
// Third attempt succeeds and is cached
const result3 = await cache.wrap('api:call', unreliableFunction, 60000);
console.log('Success:', result3); // { status: 'success', attempts: 3 }
// Fourth call uses cached value
const result4 = await cache.wrap('api:call', unreliableFunction, 60000);
console.log('Cached:', result4); // { status: 'success', attempts: 3 } (function not called)
// Listen to refresh errors via events
cache.on('refresh', ({ key, value, error }) => {
if (error) {
console.error(`Background refresh failed for ${key}:`, error);
// Old value remains in cache until expiration
}
});Advanced Error Handling:
import { createCache } from 'cache-manager';
const cache = createCache();
// Fallback value on error
async function wrapWithFallback<T>(
key: string,
fetchFn: () => Promise<T>,
fallback: T,
ttl: number
): Promise<T> {
try {
return await cache.wrap(key, fetchFn, ttl);
} catch (error) {
console.error(`Error fetching ${key}, using fallback:`, error);
return fallback;
}
}
const config = await wrapWithFallback(
'config',
async () => {
throw new Error('Config service down');
},
{ theme: 'light', lang: 'en' }, // Fallback
60000
);
console.log(config); // { theme: 'light', lang: 'en' }
// Retry with exponential backoff
async function wrapWithRetry<T>(
key: string,
fetchFn: () => Promise<T>,
ttl: number,
maxRetries = 3
): Promise<T> {
const retryFn = async (): Promise<T> => {
for (let i = 0; i < maxRetries; i++) {
try {
return await fetchFn();
} catch (error) {
if (i === maxRetries - 1) throw error;
const delay = Math.pow(2, i) * 1000; // 1s, 2s, 4s
console.log(`Retry ${i + 1}/${maxRetries} after ${delay}ms`);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw new Error('Max retries exceeded');
};
return await cache.wrap(key, retryFn, ttl);
}
// Stale-on-error pattern
async function wrapStaleOnError<T>(
key: string,
fetchFn: () => Promise<T>,
ttl: number
): Promise<T | undefined> {
try {
return await cache.wrap(key, fetchFn, ttl);
} catch (error) {
console.warn(`Fetch failed for ${key}, checking for stale data:`, error);
// Try to return stale cached value
const stale = await cache.get<T>(key);
if (stale !== undefined) {
console.log(`Returning stale data for ${key}`);
return stale;
}
throw error; // No stale data available
}
}
// Circuit breaker pattern
class CircuitBreaker {
private failures = 0;
private readonly threshold = 5;
private readonly timeout = 60000;
private openUntil = 0;
async execute<T>(fn: () => Promise<T>): Promise<T> {
const now = Date.now();
// Check if circuit is open
if (this.openUntil > now) {
throw new Error('Circuit breaker is open');
}
try {
const result = await fn();
this.failures = 0; // Reset on success
return result;
} catch (error) {
this.failures++;
if (this.failures >= this.threshold) {
this.openUntil = now + this.timeout;
console.error('Circuit breaker opened');
}
throw error;
}
}
}
const breaker = new CircuitBreaker();
async function robustWrap<T>(
key: string,
fetchFn: () => Promise<T>,
ttl: number
): Promise<T> {
return await cache.wrap(
key,
() => breaker.execute(fetchFn),
ttl
);
}Timeline Example:
Time 0s: wrap('key', fn, 10000, 3000) → fn() executes, caches for 10s
Time 2s: wrap('key', fn, 10000, 3000) → returns cached (8s remaining > 3s threshold)
Time 8s: wrap('key', fn, 10000, 3000) → returns cached (2s remaining < 3s threshold)
→ triggers background refresh
Time 8.1s: background fn() completes → updates cache
Time 9s: wrap('key', fn, 10000, 3000) → returns refreshed cached value
Time 12s: (old entry would have expired)
Time 19s: wrap('key', fn, 10000, 3000) → returns cached (still fresh from 8.1s refresh)By default, refresh only updates stores up to and including the one where the key was found. This optimizes write performance in tiered caching scenarios.
import { createCache } from 'cache-manager';
import { Keyv } from 'keyv';
import KeyvRedis from '@keyv/redis';
import { CacheableMemory } from 'cacheable';
// Two-tier cache: memory (L1) + Redis (L2)
const cache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ ttl: 30000 }) }),
new Keyv({ store: new KeyvRedis('redis://localhost:6379') }),
],
refreshAllStores: false, // Default: only refresh up to found store
});
// Scenario 1: Key found in memory (L1)
// - Refresh updates: memory only
// - Redis not updated (saves write operation)
// Scenario 2: Key found in Redis (L2)
// - Refresh updates: memory + Redis
// - Both stores kept in sync
// Scenario 3: Key not found (cache miss)
// - Function executes synchronously
// - Result written to all storesTo refresh all stores regardless of where the key was found:
const cache = createCache({
stores: [memoryStore, redisStore],
refreshAllStores: true, // Refresh all stores when threshold reached
});
// Now refresh always updates both memory and Redis
// Ensures consistency but increases write loadMonitor background refresh operations using the refresh event:
cache.on('refresh', ({ key, value, error }) => {
if (error) {
console.error(`Refresh failed for ${key}:`, error);
// Old value remains cached
} else {
console.log(`Refreshed ${key} with value:`, value);
// New value now cached
}
});Refresh Monitoring Example:
import { createCache } from 'cache-manager';
const cache = createCache();
const refreshStats = {
total: 0,
success: 0,
failed: 0,
avgDuration: 0,
durations: [] as number[],
};
const refreshTiming = new Map<string, number>();
// Track refresh start (would need custom implementation)
function startRefresh(key: string) {
refreshTiming.set(key, Date.now());
}
cache.on('refresh', ({ key, error }) => {
refreshStats.total++;
if (error) {
refreshStats.failed++;
console.error(`Refresh failed for ${key}:`, error);
} else {
refreshStats.success++;
// Track duration
const start = refreshTiming.get(key);
if (start) {
const duration = Date.now() - start;
refreshStats.durations.push(duration);
// Keep last 100
if (refreshStats.durations.length > 100) {
refreshStats.durations.shift();
}
// Update average
const sum = refreshStats.durations.reduce((a, b) => a + b, 0);
refreshStats.avgDuration = sum / refreshStats.durations.length;
refreshTiming.delete(key);
}
}
});
// Report refresh stats
setInterval(() => {
const successRate = refreshStats.total > 0
? (refreshStats.success / refreshStats.total) * 100
: 0;
console.log(`Refresh stats: ${refreshStats.total} total, ${successRate.toFixed(1)}% success, ${refreshStats.avgDuration.toFixed(0)}ms avg`);
}, 60000);The wrap() function includes built-in cache coalescing to prevent "thundering herd" problems. If multiple concurrent calls attempt to wrap the same key, only one function execution occurs, and all callers receive the same result.
import { createCache } from 'cache-manager';
const cache = createCache();
let executionCount = 0;
async function expensiveOperation() {
executionCount++;
console.log(`Execution ${executionCount}`);
await new Promise(resolve => setTimeout(resolve, 1000));
return { data: 'result', execution: executionCount };
}
// Simulate 10 concurrent requests for the same key
const promises = Array.from({ length: 10 }, () =>
cache.wrap('expensive:key', expensiveOperation, 60000)
);
const results = await Promise.all(promises);
console.log(`Function executed ${executionCount} time(s)`);
// Output: "Function executed 1 time(s)"
// All 10 calls receive the same result from single execution
console.log(results.every(r => r.execution === 1)); // trueCoalescing Behavior:
import { createCache } from 'cache-manager';
const cache = createCache();
let call = 0;
async function trackedFetch() {
const thisCall = ++call;
console.log(`Starting call ${thisCall}`);
await new Promise(resolve => setTimeout(resolve, 500));
console.log(`Finished call ${thisCall}`);
return { call: thisCall };
}
// Launch 5 concurrent wraps
const p1 = cache.wrap('key', trackedFetch, 60000);
const p2 = cache.wrap('key', trackedFetch, 60000);
const p3 = cache.wrap('key', trackedFetch, 60000);
// Wait a bit, then launch more
await new Promise(resolve => setTimeout(resolve, 100));
const p4 = cache.wrap('key', trackedFetch, 60000);
const p5 = cache.wrap('key', trackedFetch, 60000);
const [r1, r2, r3, r4, r5] = await Promise.all([p1, p2, p3, p4, p5]);
console.log('Results:', r1, r2, r3, r4, r5);
// All results are { call: 1 }
// Only "Starting call 1" and "Finished call 1" loggedMulti-Cache Instance Isolation:
import { createCache } from 'cache-manager';
// Different cache instances have separate coalescing
const cache1 = createCache({ cacheId: 'cache1' });
const cache2 = createCache({ cacheId: 'cache2' });
let execCount = 0;
async function sharedFetch() {
execCount++;
console.log(`Execution ${execCount}`);
await new Promise(resolve => setTimeout(resolve, 100));
return { count: execCount };
}
// These will coalesce within each cache instance
const [r1, r2] = await Promise.all([
cache1.wrap('key', sharedFetch, 60000),
cache1.wrap('key', sharedFetch, 60000),
]);
console.log('Cache1:', r1, r2); // Both { count: 1 }
// But separate cache instances don't coalesce with each other
const [r3, r4] = await Promise.all([
cache2.wrap('key', sharedFetch, 60000),
cache2.wrap('key', sharedFetch, 60000),
]);
console.log('Cache2:', r3, r4); // Both { count: 2 }
console.log('Total executions:', execCount); // 2In multi-store configurations:
refreshAllStores: trueimport { createCache } from 'cache-manager';
import { Keyv } from 'keyv';
import { CacheableMemory } from 'cacheable';
import KeyvRedis from '@keyv/redis';
const cache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ lruSize: 100 }) }), // L1: Small, fast
new Keyv({ store: new KeyvRedis('redis://localhost:6379') }), // L2: Large, persistent
],
});
let fetchCount = 0;
async function fetchData() {
fetchCount++;
console.log(`Fetch ${fetchCount}`);
return { data: 'value', fetchCount };
}
// First call: cache miss in both stores
const r1 = await cache.wrap('key', fetchData, 300000);
// Executes function, stores in both L1 and L2
console.log(r1); // { data: 'value', fetchCount: 1 }
// Second call: cache hit in L1
const r2 = await cache.wrap('key', fetchData, 300000);
// Returns from L1 immediately
console.log(r2); // { data: 'value', fetchCount: 1 }
// Clear L1 (simulate L1 eviction)
await cache.stores[0].clear();
// Third call: cache miss in L1, hit in L2
const r3 = await cache.wrap('key', fetchData, 300000);
// Returns from L2, promotes to L1
console.log(r3); // { data: 'value', fetchCount: 1 }
// Fourth call: cache hit in L1 again (promoted)
const r4 = await cache.wrap('key', fetchData, 300000);
console.log(r4); // { data: 'value', fetchCount: 1 }
console.log('Total fetches:', fetchCount); // 1import { createCache } from 'cache-manager';
const cache = createCache();
async function getCachedApiResponse<T>(
endpoint: string,
ttl = 60000
): Promise<T> {
return await cache.wrap(
`api:${endpoint}`,
async () => {
const response = await fetch(endpoint);
if (!response.ok) {
throw new Error(`API error: ${response.status}`);
}
return await response.json() as T;
},
ttl
);
}
// Usage
const users = await getCachedApiResponse<User[]>('/api/users', 300000);import { createCache } from 'cache-manager';
const cache = createCache();
async function getCachedQuery<T>(
query: string,
params: any[],
ttl = 60000
): Promise<T> {
const key = `query:${query}:${JSON.stringify(params)}`;
return await cache.wrap(
key,
async () => {
return await db.query(query, params) as T;
},
ttl
);
}import { createCache } from 'cache-manager';
const cache = createCache();
function memoize<T extends (...args: any[]) => any>(
fn: T,
keyFn: (...args: Parameters<T>) => string,
ttl = 60000
): T {
return ((...args: Parameters<T>) => {
const key = keyFn(...args);
return cache.wrap(key, () => fn(...args), ttl);
}) as T;
}
// Usage
const expensiveComputation = memoize(
(a: number, b: number) => {
console.log('Computing...');
return a * b;
},
(a, b) => `compute:${a}:${b}`,
5000
);
console.log(await expensiveComputation(5, 10)); // "Computing..." → 50
console.log(await expensiveComputation(5, 10)); // 50 (cached)import { createCache } from 'cache-manager';
const cache = createCache();
async function warmCache() {
const criticalKeys = [
{ key: 'config:app', fn: () => db.getAppConfig() },
{ key: 'config:features', fn: () => db.getFeatures() },
{ key: 'config:limits', fn: () => db.getLimits() },
];
await Promise.all(
criticalKeys.map(({ key, fn }) =>
cache.wrap(key, fn, 300000, 60000)
)
);
console.log('Cache warmed');
}
// Warm on startup
warmCache();
// Re-warm periodically
setInterval(warmCache, 3600000); // Every hour