High-performance Amazon S3 client with file operations, presigned URLs, streaming support, and seamless integration with Bun's native APIs for cloud storage operations.
import { S3Client } from "bun";Create and manage S3 client instances with credential management and bucket configuration.
/**
* High-performance S3 client for cloud storage operations
*/
class S3Client {
/**
* Create a new S3 client instance with credential management
* @param options - Default S3 options for all operations
*/
constructor(options?: S3Options);
/**
* Creates an S3File instance for the given path
* @param path - The path to the file in the bucket
* @param options - Additional S3 options to override defaults
* @returns S3File instance for file operations
*/
file(path: string, options?: S3Options): S3File;
/**
* Static method to create an S3File instance
* @param path - The path to the file in the bucket
* @param options - S3 credentials and configuration options
* @returns S3File instance for file operations
*/
static file(path: string, options?: S3Options): S3File;
}
interface S3Options extends BlobPropertyBag {
/** AWS Access Key ID */
accessKeyId?: string;
/** AWS Secret Access Key */
secretAccessKey?: string;
/** AWS Session Token (for temporary credentials) */
sessionToken?: string;
/** S3 bucket name */
bucket?: string;
/** S3 endpoint URL */
endpoint?: string;
/** AWS region */
region?: string;
/** Access Control List policy */
acl?: "private" | "public-read" | "public-read-write" | "authenticated-read" | "aws-exec-read" | "bucket-owner-read" | "bucket-owner-full-control";
}Usage Examples:
import { S3Client } from "bun";
// Create client with credentials
const s3 = new S3Client({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
bucket: "my-bucket",
region: "us-east-1"
});
// Create client with custom endpoint (for MinIO, DigitalOcean Spaces, etc.)
const spaces = new S3Client({
accessKeyId: "your-key",
secretAccessKey: "your-secret",
bucket: "my-space",
endpoint: "https://nyc3.digitaloceanspaces.com",
region: "us-east-1"
});Direct file operations including writing, reading, and managing files in S3 buckets.
/**
* Write data directly to a path in the bucket
* @param path - The path to the file in the bucket
* @param data - The data to write (string, buffer, stream, or web API types)
* @param options - Additional S3 options to override defaults
* @returns Promise that resolves with the number of bytes written
*/
write(
path: string,
data: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer | Request | Response | BunFile | S3File | Blob | File,
options?: S3Options
): Promise<number>;
/**
* Static method to write data directly to a path
* @param path - The path to the file in the bucket
* @param data - The data to write
* @param options - S3 credentials and configuration options
* @returns Promise that resolves with the number of bytes written
*/
static write(
path: string,
data: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer | Request | Response | BunFile | S3File | Blob | File,
options?: S3Options
): Promise<number>;
/**
* Generate a presigned URL for temporary access to a file
* @param path - The path to the file in the bucket
* @param options - Presign options including expiration time
* @returns Presigned URL string
*/
presign(path: string, options?: S3FilePresignOptions): string;
/**
* Delete a file from the bucket
* @param path - The path to the file to delete
* @param options - Additional S3 options
* @returns Promise that resolves when the file is deleted
*/
delete(path: string, options?: S3Options): Promise<void>;
/**
* Alias for delete() method
* @param path - The path to the file to delete
* @param options - Additional S3 options
* @returns Promise that resolves when the file is deleted
*/
unlink(path: string, options?: S3Options): Promise<void>;
/**
* Get the size of a file in bytes
* @param path - The path to the file
* @param options - Additional S3 options
* @returns Promise that resolves with the file size
*/
size(path: string, options?: S3Options): Promise<number>;
/**
* Check if a file exists
* @param path - The path to the file
* @param options - Additional S3 options
* @returns Promise that resolves with true if file exists
*/
exists(path: string, options?: S3Options): Promise<boolean>;
/**
* Get file metadata and statistics
* @param path - The path to the file
* @param options - Additional S3 options
* @returns Promise that resolves with file statistics
*/
stat(path: string, options?: S3Options): Promise<S3Stats>;
interface S3FilePresignOptions extends S3Options {
/** Number of seconds until the presigned URL expires @default 86400 */
expiresIn?: number;
}
interface S3Stats {
size: number;
lastModified: Date;
etag: string;
type: string;
}Usage Examples:
const s3 = new S3Client({
accessKeyId: "your-key",
secretAccessKey: "your-secret",
bucket: "my-bucket"
});
// Write different types of data
await s3.write("hello.txt", "Hello World!");
await s3.write("data.json", JSON.stringify({hello: "world"}), {
type: "application/json"
});
// Write from fetch response
const response = await fetch("https://example.com/image.jpg");
await s3.write("images/photo.jpg", response, {
acl: "public-read"
});
// File operations
const exists = await s3.exists("hello.txt"); // true
const size = await s3.size("hello.txt"); // 12
const stats = await s3.stat("hello.txt"); // Full metadata
// Generate presigned URL
const url = s3.presign("private-file.pdf", {
expiresIn: 3600 // 1 hour
});
// Delete file
await s3.delete("old-file.txt");S3File extends Blob interface for seamless integration with web APIs and provides S3-specific operations.
/**
* Represents a file in an S3-compatible storage service
* Extends the Blob interface for compatibility with web APIs
*/
interface S3File extends Blob {
/** The size of the file in bytes (requires network request) */
readonly size: Promise<number>;
/**
* Create a new S3File representing a slice of this file
* @param begin - Start byte position
* @param end - End byte position
* @param contentType - MIME type for the slice
* @returns New S3File instance representing the slice
*/
slice(begin?: number, end?: number, contentType?: string): S3File;
/**
* Create a writable stream for uploading data to this file
* @param options - S3 options for the upload
* @returns NetworkSink for streaming writes
*/
writer(options?: S3Options): NetworkSink;
/**
* Create a readable stream for downloading this file
* @returns ReadableStream for the file content
*/
stream(): ReadableStream<Uint8Array>;
/**
* Check if this file exists in the bucket
* @returns Promise that resolves with true if file exists
*/
exists(): Promise<boolean>;
/**
* Generate a presigned URL for temporary access to this file
* @param options - Presign options including expiration time
* @returns Presigned URL string
*/
presign(options?: S3FilePresignOptions): string;
/**
* Delete this file from the bucket
* @returns Promise that resolves when the file is deleted
*/
delete(): Promise<void>;
/**
* Get metadata and statistics for this file
* @returns Promise that resolves with file statistics
*/
stat(): Promise<S3Stats>;
}Usage Examples:
// Create S3File instance
const file = s3.file("documents/report.pdf");
// Check if file exists
if (await file.exists()) {
// Get file metadata
const stats = await file.stat();
console.log(`File size: ${stats.size} bytes`);
console.log(`Last modified: ${stats.lastModified}`);
// Generate temporary access URL
const downloadUrl = file.presign({
expiresIn: 1800 // 30 minutes
});
// Stream file content
const stream = file.stream();
const response = new Response(stream);
const text = await response.text();
}
// Upload with streaming
const writer = file.writer({
type: "application/pdf",
acl: "private"
});
writer.write("PDF content here...");
await writer.end();
// File slicing for partial downloads
const firstMB = file.slice(0, 1024 * 1024);
const content = await firstMB.arrayBuffer();Efficient streaming operations for large file uploads and downloads with built-in flow control.
/**
* Fast incremental writer for S3 uploads
* Provides streaming interface for efficient large file uploads
*/
interface NetworkSink {
/**
* Write a chunk of data to the upload stream
* @param chunk - The data to write
* @returns Number of bytes written
*/
write(chunk: string | ArrayBufferView | ArrayBuffer | SharedArrayBuffer): number;
/**
* Flush the internal buffer, committing data to S3
* @returns Number of bytes flushed or Promise resolving to bytes
*/
flush(): number | Promise<number>;
/**
* Complete the upload and close the stream
* @param error - Optional error to associate with the operation
* @returns Number of bytes written or Promise resolving to bytes
*/
end(error?: Error): number | Promise<number>;
/**
* Reference the stream to keep the process alive
*/
ref(): void;
/**
* Unreference the stream to allow process to exit
*/
unref(): void;
}Usage Examples:
// Streaming upload of large file
const file = s3.file("large-data.csv");
const writer = file.writer({
type: "text/csv",
acl: "private"
});
// Write data in chunks
writer.write("header1,header2,header3\n");
for (let i = 0; i < 1000000; i++) {
writer.write(`value${i},data${i},info${i}\n`);
// Periodic flush for flow control
if (i % 10000 === 0) {
await writer.flush();
}
}
// Complete the upload
await writer.end();
// Download streaming
const downloadFile = s3.file("large-download.zip");
const stream = downloadFile.stream();
// Process stream data
const reader = stream.getReader();
let totalBytes = 0;
while (true) {
const { done, value } = await reader.read();
if (done) break;
totalBytes += value.length;
console.log(`Downloaded ${totalBytes} bytes`);
}