tessl install tessl/npm-cache-manager@7.2.0Cache Manager for Node.js with support for multi-store caching, background refresh, and Keyv-compatible storage adapters
Configure tiered caching for optimal performance and persistence.
Multi-store caching uses multiple storage layers (tiers) arranged by speed:
Benefits:
import { createCache } from 'cache-manager';
import { Keyv } from 'keyv';
import KeyvRedis from '@keyv/redis';
import { CacheableMemory } from 'cacheable';
const cache = createCache({
stores: [
// L1: In-memory (fast, volatile)
new Keyv({
store: new CacheableMemory({
ttl: 60000, // 1 minute
lruSize: 1000, // Max 1000 items
}),
}),
// L2: Redis (persistent, shared)
new Keyv({
store: new KeyvRedis('redis://localhost:6379'),
}),
],
ttl: 300000, // 5 minutes default
});On Get:
On Set:
On Delete:
const cache = createCache({
stores: [
// L1: Short TTL (hot data only)
new Keyv({
store: new CacheableMemory({
ttl: 60000, // 1 minute
lruSize: 500 // Small cache
}),
}),
// L2: Long TTL (warm + cold data)
new Keyv({
store: new KeyvRedis('redis://localhost:6379'),
ttl: 3600000, // 1 hour
}),
],
ttl: 300000, // Default: 5 minutes
});For read-heavy workloads with eventual consistency:
const cache = createCache({
stores: [memoryStore, redisStore],
nonBlocking: true,
});Behavior:
Promise.race() - returns fastest responseconst cache = createCache({
stores: [memoryStore, redisStore],
refreshAllStores: false, // Default
});With refreshAllStores: false:
With refreshAllStores: true:
For enterprise applications:
const cache = createCache({
stores: [
// L1: Very fast, very small (100-500 items)
new Keyv({
store: new CacheableMemory({
ttl: 30000, // 30 seconds
lruSize: 200
}),
}),
// L2: Fast, medium size (shared across instances)
new Keyv({
store: new KeyvRedis('redis://fast-redis:6379'),
ttl: 600000, // 10 minutes
}),
// L3: Slower, large (long-term storage)
new Keyv({
store: new KeyvRedis('redis://slow-redis:6380'),
ttl: 86400000, // 24 hours
}),
],
});Track which tier serves requests:
const tierMetrics = {
l1Hits: 0,
l2Hits: 0,
misses: 0,
};
cache.on('get', ({ value, store }) => {
if (value === undefined) {
tierMetrics.misses++;
} else if (store === 'primary') {
tierMetrics.l1Hits++;
} else if (store?.startsWith('secondary')) {
tierMetrics.l2Hits++;
}
});
// Check effectiveness
console.log({
l1HitRate: tierMetrics.l1Hits / (tierMetrics.l1Hits + tierMetrics.l2Hits + tierMetrics.misses),
l2HitRate: tierMetrics.l2Hits / (tierMetrics.l1Hits + tierMetrics.l2Hits + tierMetrics.misses),
});// US region
const usCache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ lruSize: 500 }) }),
new Keyv({ store: new KeyvRedis('redis://us-east-1:6379') }),
],
});
// EU region
const euCache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ lruSize: 500 }) }),
new Keyv({ store: new KeyvRedis('redis://eu-west-1:6379') }),
],
});// User data: Fast, short TTL
const userCache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ ttl: 60000, lruSize: 1000 }) }),
new Keyv({ store: new KeyvRedis('redis://localhost:6379'), ttl: 300000 }),
],
});
// Product catalog: Slower, long TTL
const productCache = createCache({
stores: [
new Keyv({ store: new CacheableMemory({ ttl: 300000, lruSize: 500 }) }),
new Keyv({ store: new KeyvRedis('redis://localhost:6379'), ttl: 3600000 }),
],
});// Calculate based on average item size and memory budget
const avgItemSize = 1024; // 1KB per item
const memoryBudget = 50 * 1024 * 1024; // 50MB
const lruSize = Math.floor(memoryBudget / avgItemSize);
const cache = createCache({
stores: [
new Keyv({
store: new CacheableMemory({
lruSize,
ttl: 60000
}),
}),
// ... other stores
],
});import Redis from 'ioredis';
import KeyvRedis from '@keyv/redis';
// Redis with connection pool
const redis = new Redis({
host: 'localhost',
port: 6379,
maxRetriesPerRequest: 3,
enableOfflineQueue: false,
lazyConnect: true,
});
const cache = createCache({
stores: [
memoryStore,
new Keyv({ store: new KeyvRedis(redis) }),
],
});Cause: L1 TTL too short or LRU size too small
Solution:
// Increase TTL and LRU size
new CacheableMemory({
ttl: 300000, // 5 minutes instead of 1
lruSize: 2000 // 2000 items instead of 500
})Cause: Using nonBlocking: true or refreshAllStores: false
Solution:
const cache = createCache({
stores: [memoryStore, redisStore],
nonBlocking: false, // Wait for all stores
refreshAllStores: true, // Refresh all stores
});Cause: L1 too small, many requests hitting L2
Solution: