Storage higher order operation for AWS S3 with multipart upload support
npx @tessl/cli install tessl/npm-aws-sdk--lib-storage@3.879.0AWS SDK Storage Utilities provides high-level storage operations for AWS S3, specifically focusing on efficient multipart uploads of large files, buffers, blobs, or streams. The library abstracts the complexity of S3's multipart upload API by providing a configurable Upload class that automatically handles chunking, concurrency control, progress tracking, and error recovery.
npm install @aws-sdk/lib-storage @aws-sdk/client-s3import { Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";For CommonJS:
const { Upload } = require("@aws-sdk/lib-storage");
const { S3Client } = require("@aws-sdk/client-s3");import { Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
// Create S3 client
const client = new S3Client({ region: "us-east-1" });
// Create upload with basic configuration
const upload = new Upload({
client,
params: {
Bucket: "my-bucket",
Key: "my-file.txt",
Body: "Hello, World!"
}
});
// Execute upload
const result = await upload.done();
console.log("Upload completed:", result.Location);The AWS SDK Storage Utilities is built around several key components:
Core functionality for handling large file uploads to S3 with automatic multipart upload management, progress tracking, and error recovery.
/**
* Main class for handling multipart uploads to S3
* Extends EventEmitter to provide progress tracking capabilities
*/
class Upload extends EventEmitter {
/**
* Create a new Upload instance
* @param options - Configuration options for the upload
*/
constructor(options: Options);
/**
* Execute the upload and return results
* @returns Promise resolving to S3 upload result
*/
done(): Promise<CompleteMultipartUploadCommandOutput>;
/**
* Abort the upload and clean up any in-progress multipart upload
* @returns Promise that resolves when abort is complete
*/
abort(): Promise<void>;
/**
* Listen for upload progress events
* @param event - Event name, must be "httpUploadProgress"
* @param listener - Function to handle progress events
* @returns This Upload instance for chaining
*/
on(event: "httpUploadProgress", listener: (progress: Progress) => void): this;
/** Last UploadId if multipart upload was used (readonly) */
readonly uploadId?: string;
/** Event name for upload progress tracking (readonly) */
readonly uploadEvent?: string;
}Usage Examples:
import { Upload } from "@aws-sdk/lib-storage";
import { S3Client } from "@aws-sdk/client-s3";
import * as fs from "fs";
// Upload a file from file system
const fileStream = fs.createReadStream("large-file.zip");
const upload = new Upload({
client: new S3Client({ region: "us-west-2" }),
params: {
Bucket: "my-uploads",
Key: "files/large-file.zip",
Body: fileStream,
ContentType: "application/zip"
},
// Optional: configure upload behavior
queueSize: 4, // Number of concurrent uploads
partSize: 1024 * 1024 * 10, // 10MB parts
leavePartsOnError: false // Clean up failed uploads
});
// Track upload progress
upload.on("httpUploadProgress", (progress) => {
console.log(`Progress: ${progress.loaded}/${progress.total} bytes`);
console.log(`Part ${progress.part} uploading`);
});
try {
const result = await upload.done();
console.log("Upload successful:", result.Location);
} catch (error) {
console.error("Upload failed:", error);
}// Upload with tags and custom configuration
const upload = new Upload({
client: new S3Client({}),
params: {
Bucket: "my-bucket",
Key: "documents/report.pdf",
Body: pdfBuffer,
ServerSideEncryption: "AES256"
},
tags: [
{ Key: "Department", Value: "Finance" },
{ Key: "Project", Value: "Q4-Report" }
],
queueSize: 8, // Higher concurrency
partSize: 1024 * 1024 * 50 // 50MB parts for very large files
});
const result = await upload.done();// Upload with external abort controller
const abortController = new AbortController();
const upload = new Upload({
client: new S3Client({}),
params: { Bucket: "bucket", Key: "key", Body: data },
abortController
});
// Abort after 30 seconds
setTimeout(() => {
abortController.abort();
}, 30000);
try {
await upload.done();
} catch (error) {
if (error.name === "AbortError") {
console.log("Upload was cancelled");
}
}Configuration options for customizing upload behavior, performance, and error handling.
/**
* Configuration options for Upload constructor
*/
interface Options extends Partial<Configuration> {
/** S3 upload parameters (required) */
params: PutObjectCommandInput;
/** AWS S3 client instance (required) */
client: S3Client;
}
/**
* Complete upload configuration interface
*/
interface Configuration {
/**
* Number of concurrent parts to upload in parallel
* Higher values increase throughput but use more memory
* @default 4
*/
queueSize: number;
/**
* Size of each part in bytes, minimum 5MB (5242880 bytes)
* Larger parts reduce API calls but use more memory
* @default 5242880 (5MB)
*/
partSize: number;
/**
* Whether to automatically clean up failed multipart uploads
* Set to true to manually handle cleanup
* @default false
*/
leavePartsOnError: boolean;
/**
* Tags to apply to the uploaded object
* Applied after successful upload completion
* @default []
*/
tags: Tag[];
/**
* External abort controller for cancelling uploads
* Allows coordination with other async operations
*/
abortController?: AbortController;
}Real-time upload progress information provided through event-based monitoring.
/**
* Upload progress information emitted during upload process
*/
interface Progress {
/** Number of bytes uploaded so far */
loaded?: number;
/** Total number of bytes to upload (may be undefined for streams) */
total?: number;
/** Current part number being uploaded */
part?: number;
/** S3 object key being uploaded */
Key?: string;
/** S3 bucket name being uploaded to */
Bucket?: string;
}Usage Example:
upload.on("httpUploadProgress", (progress) => {
if (progress.total) {
const percentage = Math.round((progress.loaded! / progress.total) * 100);
console.log(`Upload ${percentage}% complete`);
console.log(`Part ${progress.part}: ${progress.loaded}/${progress.total} bytes`);
} else {
console.log(`Part ${progress.part}: ${progress.loaded} bytes uploaded`);
}
});Comprehensive support for various data input types with automatic chunking and streaming capabilities.
/**
* Union type for all supported upload data types
* Equivalent to PutObjectCommandInput["Body"]
*/
type BodyDataTypes = string | Uint8Array | Buffer | Readable | ReadableStream | Blob;Supported Data Types:
Usage Examples:
// String data
const stringUpload = new Upload({
client,
params: { Bucket: "bucket", Key: "text.txt", Body: "Hello, World!" }
});
// Buffer data
const buffer = Buffer.from("binary data", "utf8");
const bufferUpload = new Upload({
client,
params: { Bucket: "bucket", Key: "data.bin", Body: buffer }
});
// File stream (Node.js)
import * as fs from "fs";
const fileStream = fs.createReadStream("./large-file.zip");
const streamUpload = new Upload({
client,
params: { Bucket: "bucket", Key: "file.zip", Body: fileStream }
});
// Blob (Browser)
const fileInput = document.querySelector('input[type="file"]') as HTMLInputElement;
const file = fileInput.files![0];
const blobUpload = new Upload({
client,
params: { Bucket: "bucket", Key: file.name, Body: file }
});/**
* AWS S3 CompleteMultipartUploadCommand output
* Extended from @aws-sdk/client-s3
*/
interface CompleteMultipartUploadCommandOutput {
/** S3 object location URL */
Location?: string;
/** S3 bucket name */
Bucket?: string;
/** S3 object key */
Key?: string;
/** Entity tag for the uploaded object */
ETag?: string;
/** Server-side encryption details */
ServerSideEncryption?: string;
/** Version ID if bucket versioning is enabled */
VersionId?: string;
/** Additional metadata fields from S3 response */
[key: string]: any;
}
/**
* AWS S3 PutObjectCommandInput
* Extended from @aws-sdk/client-s3
*/
interface PutObjectCommandInput {
/** S3 bucket name (required) */
Bucket: string;
/** S3 object key (required) */
Key: string;
/** Data to upload (required) */
Body: BodyDataTypes;
/** MIME type of the object */
ContentType?: string;
/** Content encoding (e.g., gzip) */
ContentEncoding?: string;
/** Cache control header */
CacheControl?: string;
/** Content disposition header */
ContentDisposition?: string;
/** Server-side encryption settings */
ServerSideEncryption?: string;
/** Additional S3 parameters */
[key: string]: any;
}
/**
* AWS S3 Tag interface
* Extended from @aws-sdk/client-s3
*/
interface Tag {
/** Tag key */
Key: string;
/** Tag value */
Value: string;
}
/**
* Raw data part interface used internally for upload processing
* Exported as part of the public API
*/
interface RawDataPart {
/** Part number for multipart uploads */
partNumber: number;
/** Data content for this part */
data: BodyDataTypes;
/** Whether this is the final part of the upload */
lastPart?: boolean;
}
/**
* AWS S3 client from @aws-sdk/client-s3
*/
declare class S3Client {
constructor(configuration?: any);
send(command: any): Promise<any>;
config: {
requestHandler?: any;
endpoint?: () => Promise<any>;
forcePathStyle?: boolean;
requestChecksumCalculation?: () => Promise<string>;
};
}The Upload class provides comprehensive error handling with automatic cleanup and detailed error information:
try {
const result = await upload.done();
} catch (error) {
if (error.name === "AbortError") {
console.log("Upload was cancelled");
} else if (error.message.includes("EntityTooSmall")) {
console.log("Part size is too small (minimum 5MB)");
} else if (error.message.includes("Exceeded") && error.message.includes("parts")) {
console.log("File is too large (maximum 10,000 parts)");
} else {
console.error("Upload failed:", error.message);
}
}Common Error Scenarios: