The Cluster Client provides high-availability OSS access through automatic failover, load balancing, and health checking across multiple OSS endpoints. It ensures resilient cloud storage operations by distributing requests and handling endpoint failures transparently.
function ClusterClient(options: ClusterOptions): ClusterClient;
interface ClusterOptions {
clusters: ClusterConfig[];
schedule?: 'roundRobin' | 'masterSlave';
retryMax?: number;
retryDelay?: number;
timeout?: number | number[];
headers?: Record<string, string>;
masterOnly?: boolean;
heartbeatInterval?: number;
ignoreStatusFile?: boolean;
}
interface ClusterConfig {
region: string;
accessKeyId: string;
accessKeySecret: string;
stsToken?: string;
bucket?: string;
endpoint?: string;
internal?: boolean;
secure?: boolean;
timeout?: number | number[];
weight?: number;
}const OSS = require('ali-oss');
// Create cluster client with multiple endpoints
const cluster = new OSS.ClusterClient({
clusters: [
{
region: 'oss-cn-hangzhou',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'my-bucket',
weight: 10 // Higher weight = more requests
},
{
region: 'oss-cn-beijing',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'my-bucket-backup',
weight: 5 // Lower weight = fewer requests
},
{
region: 'oss-cn-shenzhen',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'my-bucket-south',
weight: 3
}
],
schedule: 'roundRobin', // or 'masterSlave'
retryMax: 3,
retryDelay: 1000
});interface RoundRobinConfig {
schedule: 'roundRobin';
clusters: ClusterConfig[];
}Characteristics:
interface MasterSlaveConfig {
schedule: 'masterSlave';
clusters: ClusterConfig[];
}Characteristics:
The Cluster Client supports all standard OSS operations with automatic endpoint selection and failover:
// All object operations work transparently
async function put(name: string, file: ObjectSource, options?: PutObjectOptions): Promise<PutObjectResult>;
async function get(name: string, file?: string | WriteStream, options?: GetObjectOptions): Promise<GetObjectResult>;
async function delete(name: string, options?: RequestOptions): Promise<DeleteObjectResult>;
async function list(query?: ListObjectsQuery, options?: RequestOptions): Promise<ListObjectsResult>;
async function head(name: string, options?: HeadObjectOptions): Promise<HeadObjectResult>;
async function copy(name: string, sourceName: string, options?: CopyObjectOptions): Promise<CopyObjectResult>;// All bucket operations work transparently
async function listBuckets(query?: ListBucketsQuery, options?: RequestOptions): Promise<ListBucketsResult>;
async function getBucketInfo(name: string, options?: RequestOptions): Promise<BucketInfoResult>;
async function putBucketACL(name: string, acl: string, options?: RequestOptions): Promise<PutBucketACLResult>;
async function getBucketACL(name: string, options?: RequestOptions): Promise<GetBucketACLResult>;// All multipart operations work transparently
async function multipartUpload(name: string, file: MultipartSource, options?: MultipartUploadOptions): Promise<MultipartUploadResult>;
async function initMultipartUpload(name: string, options?: InitMultipartUploadOptions): Promise<InitMultipartUploadResult>;
async function uploadPart(name: string, uploadId: string, partNumber: number, file: PartSource, start?: number, end?: number, options?: RequestOptions): Promise<UploadPartResult>;
async function completeMultipartUpload(name: string, uploadId: string, parts: CompletedPart[], options?: CompleteMultipartUploadOptions): Promise<CompleteMultipartUploadResult>;// Multi-region cluster for global access
const globalCluster = new OSS.ClusterClient({
clusters: [
{
region: 'oss-cn-hangzhou',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'asia-bucket',
weight: 10
},
{
region: 'oss-us-west-1',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'us-bucket',
weight: 8
},
{
region: 'oss-eu-central-1',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'eu-bucket',
weight: 6
}
],
schedule: 'roundRobin',
retryMax: 2,
retryDelay: 500
});// Primary-backup configuration
const masterSlaveCluster = new OSS.ClusterClient({
clusters: [
{
// Primary endpoint
region: 'oss-cn-hangzhou',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'primary-bucket',
internal: true, // Use internal endpoint for better performance
weight: 100
},
{
// Backup endpoint
region: 'oss-cn-beijing',
accessKeyId: 'your-access-key-id',
accessKeySecret: 'your-access-key-secret',
bucket: 'backup-bucket',
weight: 1 // Only used when primary fails
}
],
schedule: 'masterSlave',
retryMax: 3,
retryDelay: 2000
});// Cluster with STS tokens
const stsCluster = new OSS.ClusterClient({
clusters: [
{
region: 'oss-cn-hangzhou',
accessKeyId: 'temp-access-key-1',
accessKeySecret: 'temp-access-secret-1',
stsToken: 'sts-token-1',
bucket: 'secure-bucket-1'
},
{
region: 'oss-cn-beijing',
accessKeyId: 'temp-access-key-2',
accessKeySecret: 'temp-access-secret-2',
stsToken: 'sts-token-2',
bucket: 'secure-bucket-2'
}
],
schedule: 'roundRobin'
});The cluster client automatically monitors endpoint health through:
// Failover is automatic and transparent
try {
// This request will automatically failover if needed
const result = await cluster.put('important-file.txt', fileContent);
console.log('Upload successful');
} catch (error) {
// Only throws if ALL endpoints fail
console.error('All endpoints failed:', error);
}// Health checking methods (conceptual - actual implementation may vary)
async function checkClusterHealth(): Promise<ClusterHealthReport>;
interface ClusterHealthReport {
totalEndpoints: number;
healthyEndpoints: number;
unhealthyEndpoints: number;
endpoints: EndpointHealth[];
}
interface EndpointHealth {
region: string;
status: 'healthy' | 'unhealthy' | 'unknown';
lastCheck: Date;
responseTime?: number;
errorRate?: number;
}// Upload with automatic failover
async function reliableUpload(filename, content, options = {}) {
const uploadOptions = {
...options,
timeout: [5000, 30000], // 5s connect, 30s response
headers: {
'x-oss-storage-class': 'Standard',
...options.headers
}
};
try {
const result = await cluster.put(filename, content, uploadOptions);
console.log(`Uploaded ${filename} successfully`);
return result;
} catch (error) {
console.error(`Failed to upload ${filename}:`, error.message);
throw error;
}
}
// Usage
await reliableUpload('report.pdf', pdfBuffer, {
meta: { author: 'John Doe', version: '1.0' }
});// Process files with automatic retry across endpoints
async function processFiles(filenames) {
const results = [];
for (const filename of filenames) {
try {
// Download with automatic failover
const object = await cluster.get(filename);
// Process the file
const processedContent = processFile(object.content);
// Upload processed version with failover
const uploadResult = await cluster.put(
`processed/${filename}`,
processedContent,
{ meta: { processed: 'true', timestamp: new Date().toISOString() } }
);
results.push({ filename, status: 'success', url: uploadResult.url });
} catch (error) {
console.error(`Failed to process ${filename}:`, error);
results.push({ filename, status: 'failed', error: error.message });
}
}
return results;
}// Large file upload with cluster resilience
async function clusterMultipartUpload(filename, filePath) {
const options = {
parallel: 3,
partSize: 1024 * 1024, // 1MB parts
progress: (percentage, checkpoint) => {
console.log(`Upload progress: ${Math.round(percentage * 100)}%`);
// Save checkpoint for resume capability
saveCheckpoint(filename, checkpoint);
},
timeout: [10000, 60000], // Longer timeouts for large uploads
retryMax: 2 // Retry failed parts
};
return cluster.multipartUpload(filename, filePath, options);
}// Adjust weights based on performance characteristics
const optimizedCluster = new OSS.ClusterClient({
clusters: [
{
region: 'oss-cn-hangzhou',
accessKeyId: 'your-key',
accessKeySecret: 'your-secret',
bucket: 'fast-endpoint',
internal: true, // Internal network for better performance
weight: 15 // Higher weight for faster endpoint
},
{
region: 'oss-cn-beijing',
accessKeyId: 'your-key',
accessKeySecret: 'your-secret',
bucket: 'standard-endpoint',
weight: 10 // Standard weight
},
{
region: 'oss-cn-shenzhen',
accessKeyId: 'your-key',
accessKeySecret: 'your-secret',
bucket: 'backup-endpoint',
weight: 5 // Lower weight for backup
}
],
schedule: 'roundRobin'
});const http = require('http');
const https = require('https');
// Shared agents for connection pooling
const httpAgent = new http.Agent({
keepAlive: true,
maxSockets: 20,
maxFreeSockets: 10
});
const httpsAgent = new https.Agent({
keepAlive: true,
maxSockets: 20,
maxFreeSockets: 10
});
const cluster = new OSS.ClusterClient({
clusters: clusters.map(config => ({
...config,
agent: httpAgent,
httpsAgent: httpsAgent,
timeout: [5000, 30000] // Optimized timeouts
})),
schedule: 'roundRobin'
});async function robustClusterOperation(operation) {
const maxAttempts = 3;
let attempt = 0;
while (attempt < maxAttempts) {
try {
const result = await operation();
return result;
} catch (error) {
attempt++;
// Log error details
console.error(`Attempt ${attempt} failed:`, {
error: error.message,
code: error.code,
status: error.status,
endpoint: error.hostId || 'unknown'
});
// Check if error is retryable
if (isRetryableError(error) && attempt < maxAttempts) {
const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
console.log(`Retrying in ${delay}ms...`);
await sleep(delay);
continue;
}
throw error;
}
}
}
function isRetryableError(error) {
const retryableCodes = [
'RequestTimeout',
'ConnectionTimeout',
'ServiceUnavailable',
'InternalError',
'SlowDown'
];
return retryableCodes.includes(error.code) ||
(error.status >= 500 && error.status < 600);
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}class ClusterMonitor {
constructor(cluster) {
this.cluster = cluster;
this.metrics = {
totalRequests: 0,
successfulRequests: 0,
failedRequests: 0,
endpointStats: new Map()
};
}
recordRequest(endpoint, success, responseTime, error = null) {
this.metrics.totalRequests++;
if (success) {
this.metrics.successfulRequests++;
} else {
this.metrics.failedRequests++;
}
// Track per-endpoint stats
if (!this.metrics.endpointStats.has(endpoint)) {
this.metrics.endpointStats.set(endpoint, {
requests: 0,
successes: 0,
failures: 0,
totalResponseTime: 0,
errors: []
});
}
const stats = this.metrics.endpointStats.get(endpoint);
stats.requests++;
stats.totalResponseTime += responseTime || 0;
if (success) {
stats.successes++;
} else {
stats.failures++;
if (error) {
stats.errors.push({
timestamp: new Date(),
error: error.message,
code: error.code
});
// Keep only recent errors
if (stats.errors.length > 10) {
stats.errors.shift();
}
}
}
}
getHealthReport() {
const report = {
overall: {
totalRequests: this.metrics.totalRequests,
successRate: this.metrics.successfulRequests / this.metrics.totalRequests,
failureRate: this.metrics.failedRequests / this.metrics.totalRequests
},
endpoints: []
};
for (const [endpoint, stats] of this.metrics.endpointStats) {
report.endpoints.push({
endpoint,
requests: stats.requests,
successRate: stats.successes / stats.requests,
averageResponseTime: stats.totalResponseTime / stats.requests,
recentErrors: stats.errors.slice(-3)
});
}
return report;
}
}close(): void;Usage:
// Cleanup cluster resources when done
cluster.close();The cluster client includes automatic health checking mechanisms:
The cluster client maintains availability state through: