Opinionated, caching, retrying fetch client for Node.js with enterprise-grade HTTP features
74
Configurable retry mechanism for handling transient network failures and server errors with exponential backoff and intelligent retry conditions.
Configure automatic request retries with detailed control over retry behavior and conditions.
/**
* Retry configuration options
*/
interface RetryOptions {
retries?: number; // Number of retry attempts (default: 0)
factor?: number; // Exponential backoff factor (default: 2)
minTimeout?: number; // Minimum retry timeout in ms (default: 1000)
maxTimeout?: number; // Maximum retry timeout in ms (default: Infinity)
randomize?: boolean; // Whether to randomize retry intervals (default: false)
}
/**
* Callback function called on each retry attempt
* @param {Error|Response} cause - Error or response that caused the retry
*/
type OnRetryCallback = (cause: Error | Response) => void;Usage Examples:
const fetch = require('make-fetch-happen');
// Basic retry configuration
const response = await fetch('https://unreliable-api.example.com/data', {
retry: {
retries: 3,
factor: 2,
minTimeout: 1000,
maxTimeout: 10000,
randomize: true
}
});
// Simple retry count
const response2 = await fetch('https://api.example.com/data', {
retry: 5 // Equivalent to { retries: 5 }
});
// Disable retries
const response3 = await fetch('https://api.example.com/data', {
retry: false // Equivalent to { retries: 0 }
});
// With retry callback
const response4 = await fetch('https://flaky-api.example.com/data', {
retry: { retries: 3 },
onRetry: (cause) => {
if (cause instanceof Response) {
console.log(`Retrying due to ${cause.status} ${cause.statusText}`);
} else {
console.log(`Retrying due to error: ${cause.message}`);
}
}
});Requests are automatically retried under specific conditions:
/**
* Retry conditions (all must be true):
* 1. Request method is NOT 'POST' (to avoid duplicate mutations)
* 2. Request does not have a streaming body
* 3. One of the following:
* a. Response status is 408, 420, 429, or >= 500
* b. Request failed with retriable error codes
* c. Request failed with retriable error types
*/
/**
* Retriable error codes:
*/
const RETRY_ERRORS = [
'ECONNRESET', // Remote socket closed
'ECONNREFUSED', // Connection refused
'EADDRINUSE', // Address in use
'ETIMEDOUT', // Operation timed out
'ECONNECTIONTIMEOUT', // Connection timeout (from @npmcli/agent)
'EIDLETIMEOUT', // Idle timeout (from @npmcli/agent)
'ERESPONSETIMEOUT', // Response timeout (from @npmcli/agent)
'ETRANSFERTIMEOUT' // Transfer timeout (from @npmcli/agent)
];
/**
* Retriable error types:
*/
const RETRY_TYPES = [
'request-timeout' // Request timeout from fetch
];
/**
* Non-retriable conditions:
* - ENOTFOUND (DNS resolution failure - likely offline or bad hostname)
* - EINVALIDPROXY (Invalid proxy configuration)
* - EINVALIDRESPONSE (Invalid response from server)
* - All 2xx and 3xx response codes
* - 4xx response codes except 408, 420, 429
*/Retry timing follows exponential backoff with optional randomization:
/**
* Backoff calculation:
* timeout = min(minTimeout * (factor ^ attempt), maxTimeout)
*
* If randomize is true:
* timeout = timeout * (1 + Math.random())
*
* Default values:
* - minTimeout: 1000ms
* - factor: 2
* - maxTimeout: Infinity
* - randomize: false
*/Usage Examples:
// Custom backoff strategy
const response = await fetch('https://api.example.com/data', {
retry: {
retries: 5,
factor: 1.5, // Slower exponential growth
minTimeout: 2000, // Start with 2 second delay
maxTimeout: 30000, // Cap at 30 seconds
randomize: true // Add jitter to prevent thundering herd
}
});
// Backoff progression with above config:
// Attempt 1: 2000-4000ms (randomized)
// Attempt 2: 3000-6000ms (randomized)
// Attempt 3: 4500-9000ms (randomized)
// Attempt 4: 6750-13500ms (randomized)
// Attempt 5: 10125-20250ms (randomized, but capped at 30000ms)Monitor retry attempts and implement custom logic:
/**
* Retry callback receives the cause of the retry
* @param {Error|Response} cause - What triggered the retry
*/
type OnRetryCallback = (cause: Error | Response) => void;Usage Examples:
let retryCount = 0;
const response = await fetch('https://api.example.com/data', {
retry: { retries: 3 },
onRetry: (cause) => {
retryCount++;
if (cause instanceof Response) {
console.log(`Retry ${retryCount}: Server returned ${cause.status}`);
// Log response headers for debugging
const rateLimitReset = cause.headers.get('x-rate-limit-reset');
if (rateLimitReset) {
console.log(`Rate limit resets at: ${new Date(rateLimitReset * 1000)}`);
}
} else {
console.log(`Retry ${retryCount}: Network error - ${cause.message}`);
console.log(`Error code: ${cause.code}`);
}
}
});POST requests are handled specially to prevent accidental duplicate operations:
/**
* POST request retry behavior:
* - POST requests are never retried for response status codes
* - POST requests are only retried for network-level errors
* - This prevents accidental duplicate mutations on the server
*/Usage Examples:
// GET request - will retry on 500 errors
const getData = await fetch('https://api.example.com/data', {
retry: { retries: 3 }
});
// POST request - will only retry on network errors, not 500 responses
const postData = await fetch('https://api.example.com/data', {
method: 'POST',
body: JSON.stringify({ action: 'create' }),
headers: { 'Content-Type': 'application/json' },
retry: { retries: 3 } // Only retries network errors for POST
});
// Use idempotent POST patterns for retries
const idempotentPost = await fetch('https://api.example.com/data', {
method: 'POST',
body: JSON.stringify({
idempotencyKey: 'unique-key-123',
action: 'create'
}),
headers: {
'Content-Type': 'application/json',
'Idempotency-Key': 'unique-key-123'
},
retry: { retries: 3 }
});Common patterns for robust retry handling:
// Circuit breaker pattern
class CircuitBreaker {
constructor(threshold = 5, timeout = 60000) {
this.failureCount = 0;
this.lastFailTime = 0;
this.threshold = threshold;
this.timeout = timeout;
}
async call(fetchFn) {
if (this.isOpen()) {
throw new Error('Circuit breaker is open');
}
try {
const result = await fetchFn();
this.onSuccess();
return result;
} catch (error) {
this.onFailure();
throw error;
}
}
isOpen() {
return this.failureCount >= this.threshold &&
(Date.now() - this.lastFailTime) < this.timeout;
}
onSuccess() {
this.failureCount = 0;
}
onFailure() {
this.failureCount++;
this.lastFailTime = Date.now();
}
}
// Usage with circuit breaker
const breaker = new CircuitBreaker();
const response = await breaker.call(() =>
fetch('https://api.example.com/data', {
retry: { retries: 2 }
})
);
// Conditional retry based on error type
const smartRetry = {
retries: 3,
onRetry: (cause) => {
if (cause instanceof Response && cause.status === 429) {
// For rate limiting, wait longer
const retryAfter = cause.headers.get('retry-after');
if (retryAfter) {
console.log(`Rate limited, retry after ${retryAfter} seconds`);
}
}
}
};
// Metrics collection
const metrics = { attempts: 0, retries: 0, failures: 0 };
const response = await fetch('https://api.example.com/data', {
retry: { retries: 3 },
onRetry: () => {
metrics.retries++;
}
});
metrics.attempts = 1 + metrics.retries;
if (!response.ok) {
metrics.failures++;
}Install with Tessl CLI
npx tessl i tessl/npm-make-fetch-happenevals
scenario-1
scenario-2
scenario-3
scenario-4
scenario-5
scenario-6
scenario-7
scenario-8
scenario-9