The AWS SDK provides a comprehensive request/response system with event-driven lifecycle management, support for callbacks, promises, streaming, pagination, and robust error handling.
class Request<TData = any> {
constructor(service: Service, operation: string, params?: any);
// Core properties
service: Service;
operation: string;
params: any;
httpRequest: HttpRequest;
startTime: Date;
response: Response<TData>;
// Execution methods
send(callback?: RequestCallback<TData>): void;
promise(): Promise<Response<TData>>;
abort(): void;
// Streaming (Node.js only)
createReadStream(): NodeJS.ReadableStream;
// Pagination
eachPage(callback: EachPageCallback): void;
eachItem(callback: EachItemCallback): void;
// Event handling
on(event: string, listener: Function): Request<TData>;
onAsync(event: string, listener: Function): Request<TData>;
removeListener(event: string, listener: Function): Request<TData>;
removeAllListeners(event?: string): Request<TData>;
// State management
isPageable(): boolean;
hasNextPage(): boolean;
nextPage(callback?: RequestCallback): Request;
}
type RequestCallback<TData = any> = (err: AWSError | null, data?: TData) => void;
type EachPageCallback = (err: AWSError | null, data?: any, done?: () => void) => void;
type EachItemCallback = (err: AWSError | null, item?: any, done?: () => void) => void;Callback Style:
const params = { Bucket: 'my-bucket', Key: 'my-file.txt' };
s3.getObject(params, (err, data) => {
if (err) {
console.error('Error:', err);
} else {
console.log('Success:', data);
}
});Promise Style:
const params = { Bucket: 'my-bucket', Key: 'my-file.txt' };
s3.getObject(params).promise()
.then(data => {
console.log('Success:', data);
})
.catch(err => {
console.error('Error:', err);
});
// With async/await
try {
const data = await s3.getObject(params).promise();
console.log('Success:', data);
} catch (err) {
console.error('Error:', err);
}Request Object:
const request = s3.getObject(params);
// Add event listeners
request.on('success', (response) => {
console.log('Request succeeded:', response.data);
});
request.on('error', (error) => {
console.error('Request failed:', error);
});
// Send the request
request.send();Reading Stream (Node.js):
const params = { Bucket: 'my-bucket', Key: 'large-file.zip' };
const stream = s3.getObject(params).createReadStream();
stream.on('data', (chunk) => {
console.log('Received chunk:', chunk.length);
});
stream.on('end', () => {
console.log('Stream ended');
});
stream.on('error', (err) => {
console.error('Stream error:', err);
});Upload Stream:
const fs = require('fs');
const fileStream = fs.createReadStream('local-file.txt');
const params = {
Bucket: 'my-bucket',
Key: 'uploaded-file.txt',
Body: fileStream
};
s3.upload(params, (err, data) => {
if (err) console.error(err);
else console.log('Upload successful:', data);
});class Response<TData = any> {
// Core properties
data: TData;
error?: AWSError;
requestId: string;
extendedRequestId?: string;
cfId?: string;
retryCount: number;
redirectCount: number;
httpResponse: HttpResponse;
// Pagination
hasNextPage(): boolean;
nextPage(callback?: RequestCallback): Request;
}Response Properties:
s3.listObjects({ Bucket: 'my-bucket' }, (err, data) => {
if (err) {
console.error('Error:', err);
} else {
console.log('Request ID:', data.ResponseMetadata?.RequestId);
console.log('Objects:', data.Contents);
}
});interface AWSError extends Error {
// Core error properties
name: string;
message: string;
code: string;
time: Date;
// Request context
requestId?: string;
extendedRequestId?: string;
cfId?: string;
statusCode?: number;
// Retry information
retryable?: boolean;
retryDelay?: number;
// Service-specific
region?: string;
hostname?: string;
// Original error
originalError?: Error;
}Error Handling Examples:
s3.getObject(params, (err, data) => {
if (err) {
console.log('Error Code:', err.code);
console.log('Status Code:', err.statusCode);
console.log('Request ID:', err.requestId);
console.log('Retryable:', err.retryable);
// Handle specific errors
if (err.code === 'NoSuchKey') {
console.log('File does not exist');
} else if (err.code === 'AccessDenied') {
console.log('Permission denied');
}
}
});// Page through all results
s3.listObjects({ Bucket: 'my-bucket' }).eachPage((err, data, done) => {
if (err) {
console.error('Error:', err);
return done();
}
if (data) {
console.log('Page:', data.Contents?.length, 'objects');
data.Contents?.forEach(obj => {
console.log('Object:', obj.Key);
});
}
// Continue to next page (or stop by not calling done())
done();
});
// Iterate through individual items
s3.listObjects({ Bucket: 'my-bucket' }).eachItem((err, item, done) => {
if (err) {
console.error('Error:', err);
return done();
}
if (item) {
console.log('Item:', item.Key);
}
done();
});function listAllObjects(bucket, callback) {
const allObjects = [];
function listPage(nextToken) {
const params = {
Bucket: bucket,
ContinuationToken: nextToken
};
s3.listObjectsV2(params, (err, data) => {
if (err) return callback(err);
allObjects.push(...data.Contents || []);
if (data.IsTruncated) {
// Get next page
listPage(data.NextContinuationToken);
} else {
// All pages retrieved
callback(null, allObjects);
}
});
}
listPage();
}// Request lifecycle events (in order)
const requestEvents = [
'validate', // Parameter validation
'build', // Request building
'afterBuild', // Post-build customization
'sign', // Request signing
'send', // HTTP transmission
'validateResponse', // Response validation
'extractData', // Data extraction
'success', // Successful completion
'error', // Error occurred
'complete' // Request lifecycle complete
];
// Streaming events
const streamingEvents = [
'httpData', // HTTP data received
'httpUploadProgress', // Upload progress
'httpDownloadProgress', // Download progress
'httpDone', // HTTP completion
'retry' // Retry attempt
];Request-Level Events:
const request = s3.getObject(params);
request.on('build', (req) => {
console.log('Building request:', req.operation);
});
request.on('sign', (req) => {
console.log('Signing request with:', req.service.config.credentials?.accessKeyId);
});
request.on('send', (resp) => {
console.log('Sending request to:', resp.request.httpRequest.endpoint.href);
});
request.on('httpData', (chunk, resp) => {
console.log('Received data chunk:', chunk.length, 'bytes');
});
request.on('success', (resp) => {
console.log('Request successful:', resp.requestId);
});
request.on('error', (err, resp) => {
console.log('Request failed:', err.code);
});
request.on('complete', (resp) => {
console.log('Request completed in:', Date.now() - request.startTime.getTime(), 'ms');
});
request.send();Global Events:
// Listen to all requests across all services
AWS.events.on('send', (resp) => {
resp.startTime = Date.now();
});
AWS.events.on('complete', (resp) => {
const duration = Date.now() - resp.startTime;
console.log(`${resp.request.service.serviceIdentifier}.${resp.request.operation} took ${duration}ms`);
});const params = {
Bucket: 'my-bucket',
Key: 'large-file.zip',
Body: fs.createReadStream('large-file.zip')
};
const request = s3.upload(params);
request.on('httpUploadProgress', (progress) => {
console.log(`Upload progress: ${progress.loaded}/${progress.total} bytes`);
console.log(`${Math.round(progress.loaded / progress.total * 100)}% complete`);
});
request.send((err, data) => {
if (err) console.error(err);
else console.log('Upload complete:', data.Location);
});const request = s3.getObject(params);
request.on('httpDownloadProgress', (progress) => {
console.log(`Download progress: ${progress.loaded}/${progress.total} bytes`);
});
request.send((err, data) => {
if (err) console.error(err);
else console.log('Download complete:', data.Body.length, 'bytes');
});const request = s3.getObject({
Bucket: 'my-bucket',
Key: 'large-file.zip'
});
// Abort after 5 seconds
setTimeout(() => {
request.abort();
}, 5000);
request.on('error', (err) => {
if (err.code === 'RequestAbortedError') {
console.log('Request was aborted');
}
});
request.send();// Configure retries globally
AWS.config.update({
maxRetries: 5,
retryDelayOptions: {
customBackoff: function(retryCount, err) {
// Exponential backoff with jitter
return Math.pow(2, retryCount) * 100 + Math.random() * 100;
}
}
});
// Per-service configuration
const s3 = new AWS.S3({
maxRetries: 10,
retryDelayOptions: {
base: 300 // Base delay of 300ms
}
});const request = s3.getObject(params);
request.on('retry', (resp) => {
console.log('Retrying request:', resp.retryCount);
console.log('Retry delay:', resp.error?.retryDelay, 'ms');
});
request.send();// Wait for resource to reach desired state
s3.waitFor(state: string, params?: any, callback?: Callback<any>): Request<any>;
// Common waiter states
const waiterStates = [
'bucketExists', // S3 bucket exists
'bucketNotExists', // S3 bucket does not exist
'objectExists', // S3 object exists
'objectNotExists', // S3 object does not exist
'instanceRunning', // EC2 instance is running
'instanceStopped', // EC2 instance is stopped
'instanceTerminated', // EC2 instance is terminated
'snapshotCompleted', // EBS snapshot completed
'imageAvailable', // AMI is available
'loadBalancerAvailable', // ELB is available
'stackCreateComplete', // CloudFormation stack created
'stackDeleteComplete', // CloudFormation stack deleted
'stackUpdateComplete' // CloudFormation stack updated
];Waiter Examples:
// Wait for S3 bucket to exist
s3.waitFor('bucketExists', { Bucket: 'my-bucket' }, (err, data) => {
if (err) console.log('Bucket does not exist:', err);
else console.log('Bucket exists:', data);
});
// Wait for EC2 instance to be running
ec2.waitFor('instanceRunning', { InstanceIds: ['i-1234567890abcdef0'] }, (err, data) => {
if (err) console.log('Instance not running:', err);
else console.log('Instance is running:', data);
});
// Custom waiter configuration
const waiterOptions = {
delay: 15, // Check every 15 seconds
maxAttempts: 40 // Maximum 40 attempts (10 minutes)
};
s3.waitFor('objectExists', { Bucket: 'my-bucket', Key: 'my-file.txt' }, waiterOptions, (err, data) => {
if (err) console.log('Object does not exist after waiting:', err);
else console.log('Object exists:', data);
});