Microsoft Azure client library for Blob Storage - Azure Blob Storage is Microsoft's object storage solution for the cloud, optimized for storing massive amounts of unstructured data such as text or binary data.
—
This documentation covers the comprehensive configuration options available in the Azure Storage Blob Java SDK for fine-tuning client behavior, performance, and operation parameters.
Configuration for parallel blob upload operations.
import com.azure.storage.blob.options.BlobParallelUploadOptions;
import com.azure.storage.blob.models.*;
import com.azure.core.util.BinaryData;
import reactor.core.publisher.Flux;
import java.nio.ByteBuffer;
// Basic parallel upload configuration
ParallelTransferOptions transferOptions = new ParallelTransferOptions()
.setBlockSizeLong(4 * 1024 * 1024L) // 4MB blocks
.setMaxConcurrency(8)
.setProgressListener(bytesTransferred ->
System.out.println("Uploaded: " + bytesTransferred + " bytes"));
BlobParallelUploadOptions uploadOptions = new BlobParallelUploadOptions(
BinaryData.fromString("Large content for parallel upload"))
.setParallelTransferOptions(transferOptions)
.setHeaders(new BlobHttpHeaders()
.setContentType("text/plain")
.setContentEncoding("utf-8")
.setCacheControl("no-cache"))
.setMetadata(Map.of(
"upload-method", "parallel",
"timestamp", OffsetDateTime.now().toString()))
.setTags(Map.of(
"environment", "production",
"team", "backend"))
.setTier(AccessTier.HOT)
.setRequestConditions(new BlobRequestConditions()
.setIfNoneMatch("*"));
// Upload with comprehensive options
Response<BlockBlobItem> uploadResponse = blobClient.uploadWithResponse(
uploadOptions,
Duration.ofMinutes(10),
Context.NONE
);
// Upload from Flux (reactive streams)
Flux<ByteBuffer> dataFlux = Flux.range(1, 1000)
.map(i -> ByteBuffer.wrap(("Data chunk " + i + "\n").getBytes()));
BlobParallelUploadOptions fluxOptions = new BlobParallelUploadOptions(dataFlux)
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(1024 * 1024L) // 1MB blocks for streaming
.setMaxConcurrency(4))
.setHeaders(new BlobHttpHeaders().setContentType("text/plain"));
// Use with async client for reactive upload
BlobAsyncClient asyncClient = blobClient.getAsyncClient();
asyncClient.uploadWithResponse(fluxOptions)
.doOnSuccess(response -> System.out.println("Reactive upload completed"))
.subscribe();Configuration for uploading blobs from local files.
import com.azure.storage.blob.options.BlobUploadFromFileOptions;
// Configure file upload with all options
BlobUploadFromFileOptions fileUploadOptions = new BlobUploadFromFileOptions("large-dataset.csv")
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(8 * 1024 * 1024L) // 8MB blocks for large files
.setMaxConcurrency(6)
.setMaxSingleUploadSizeLong(256 * 1024 * 1024L) // 256MB single upload threshold
.setProgressListener(bytesTransferred -> {
long totalSize = getTotalFileSize("large-dataset.csv");
double percentage = (double) bytesTransferred / totalSize * 100;
System.out.printf("Upload progress: %.1f%% (%d/%d bytes)%n",
percentage, bytesTransferred, totalSize);
}))
.setHeaders(new BlobHttpHeaders()
.setContentType("text/csv")
.setContentDisposition("attachment; filename=dataset.csv")
.setCacheControl("private, max-age=0"))
.setMetadata(Map.of(
"source-file", "large-dataset.csv",
"file-size", String.valueOf(getTotalFileSize("large-dataset.csv")),
"upload-date", OffsetDateTime.now().toString(),
"checksum", calculateFileChecksum("large-dataset.csv")))
.setTags(Map.of(
"data-type", "csv",
"department", "analytics",
"retention-years", "7"))
.setTier(AccessTier.HOT)
.setRequestConditions(new BlobRequestConditions()
.setIfNoneMatch("*") // Only upload if blob doesn't exist
.setTagsConditions("\"data-type\" <> 'csv'")); // Prevent overwriting CSV files
// Execute upload with comprehensive error handling
try {
Response<BlockBlobItem> response = blobClient.uploadFromFileWithResponse(
fileUploadOptions,
Duration.ofMinutes(30),
Context.NONE
);
BlockBlobItem result = response.getValue();
System.out.println("File upload completed:");
System.out.println("ETag: " + result.getETag());
System.out.println("Version ID: " + result.getVersionId());
System.out.println("Upload time: " + result.getLastModified());
} catch (BlobStorageException ex) {
handleUploadError(ex);
} catch (Exception ex) {
System.err.println("Unexpected error during file upload: " + ex.getMessage());
}
// Helper methods
private long getTotalFileSize(String filePath) {
try {
return Files.size(Paths.get(filePath));
} catch (Exception ex) {
return 0;
}
}
private String calculateFileChecksum(String filePath) {
try {
MessageDigest md5 = MessageDigest.getInstance("MD5");
try (InputStream fis = Files.newInputStream(Paths.get(filePath))) {
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = fis.read(buffer)) != -1) {
md5.update(buffer, 0, bytesRead);
}
}
return Base64.getEncoder().encodeToString(md5.digest());
} catch (Exception ex) {
return "unknown";
}
}Configuration for downloading blobs to local files.
import com.azure.storage.blob.options.BlobDownloadToFileOptions;
import com.azure.storage.blob.models.DownloadRetryOptions;
import java.nio.file.StandardOpenOption;
// Configure comprehensive file download
BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions("downloaded-file.dat")
.setRange(new BlobRange(0)) // Download from beginning (entire file)
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(4 * 1024 * 1024L) // 4MB download blocks
.setMaxConcurrency(8)
.setProgressListener(bytesTransferred -> {
System.out.println("Downloaded: " + bytesTransferred + " bytes");
}))
.setDownloadRetryOptions(new DownloadRetryOptions()
.setMaxRetryRequests(5))
.setRequestConditions(new BlobRequestConditions()
.setIfModifiedSince(OffsetDateTime.now().minusDays(1)))
.setRetrieveContentMd5(true) // Validate content integrity
.setOpenOptions(Set.of(
StandardOpenOption.CREATE,
StandardOpenOption.WRITE,
StandardOpenOption.TRUNCATE_EXISTING));
// Execute download with monitoring
try {
Response<BlobProperties> downloadResponse = blobClient.downloadToFileWithResponse(
downloadOptions,
Duration.ofMinutes(15),
Context.NONE
);
BlobProperties properties = downloadResponse.getValue();
System.out.println("Download completed:");
System.out.println("File size: " + properties.getBlobSize() + " bytes");
System.out.println("Content type: " + properties.getContentType());
System.out.println("Last modified: " + properties.getLastModified());
// Verify content integrity if MD5 was requested
byte[] contentMd5 = properties.getContentMd5();
if (contentMd5 != null) {
System.out.println("Content MD5: " + Base64.getEncoder().encodeToString(contentMd5));
verifyDownloadedFileIntegrity("downloaded-file.dat", contentMd5);
}
} catch (BlobStorageException ex) {
handleDownloadError(ex);
}
// Partial download with range
BlobRange partialRange = new BlobRange(1024 * 1024, 5 * 1024 * 1024L); // Download 5MB starting at 1MB
BlobDownloadToFileOptions partialOptions = new BlobDownloadToFileOptions("partial-download.dat")
.setRange(partialRange)
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(512 * 1024L) // Smaller blocks for partial download
.setMaxConcurrency(4));
Response<BlobProperties> partialResponse = blobClient.downloadToFileWithResponse(
partialOptions,
Duration.ofMinutes(5),
Context.NONE
);
System.out.println("Partial download completed: " +
Files.size(Paths.get("partial-download.dat")) + " bytes");Configuration for in-memory blob downloads.
import com.azure.storage.blob.options.BlobDownloadOptions;
// Configure in-memory download with range
BlobDownloadOptions downloadOptions = new BlobDownloadOptions()
.setRange(new BlobRange(0, 1024 * 1024L)) // First 1MB
.setRequestConditions(new BlobRequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now()));
// Download to memory
BlobDownloadContentResponse contentResponse = blobClient.downloadContentWithResponse(
downloadOptions,
Duration.ofMinutes(2),
Context.NONE
);
BinaryData content = contentResponse.getValue();
System.out.println("Downloaded " + content.getLength() + " bytes to memory");
// Process content based on type
String contentType = contentResponse.getHeaders().getValue("Content-Type");
if (contentType != null && contentType.startsWith("text/")) {
String textContent = content.toString();
System.out.println("Text content preview: " +
textContent.substring(0, Math.min(100, textContent.length())));
} else {
System.out.println("Binary content downloaded");
}Configuration for creating blob containers.
import com.azure.storage.blob.options.BlobContainerCreateOptions;
import com.azure.storage.blob.models.BlobContainerEncryptionScope;
// Configure container creation with all options
BlobContainerCreateOptions createOptions = new BlobContainerCreateOptions()
.setMetadata(Map.of(
"environment", "production",
"team", "backend",
"created-by", "deployment-script",
"cost-center", "12345",
"retention-policy", "7-years"))
.setPublicAccessType(PublicAccessType.BLOB) // Allow public blob access
.setEncryptionScope(new BlobContainerEncryptionScope()
.setDefaultEncryptionScope("containerencryption")
.setPreventEncryptionScopeOverride(true));
// Create container with comprehensive configuration
try {
Response<Void> createResponse = containerClient.createWithResponse(
createOptions,
Duration.ofMinutes(2),
Context.NONE
);
System.out.println("Container created successfully:");
System.out.println("Status code: " + createResponse.getStatusCode());
System.out.println("Request ID: " + createResponse.getHeaders().getValue("x-ms-request-id"));
// Verify configuration
BlobContainerProperties properties = containerClient.getProperties();
System.out.println("Public access: " + properties.getPublicAccess());
System.out.println("Metadata count: " + properties.getMetadata().size());
System.out.println("Default encryption scope: " + properties.getDefaultEncryptionScope());
} catch (BlobStorageException ex) {
if (ex.getErrorCode() == BlobErrorCode.CONTAINER_ALREADY_EXISTS) {
System.out.println("Container already exists");
} else {
throw ex;
}
}
// Create container with minimal public access
BlobContainerCreateOptions secureOptions = new BlobContainerCreateOptions()
.setMetadata(Map.of("security-level", "high"))
.setPublicAccessType(null); // No public access
containerClient.createWithResponse(secureOptions, Duration.ofMinutes(1), Context.NONE);Configuration for listing blobs in containers.
import com.azure.storage.blob.models.ListBlobsOptions;
import com.azure.storage.blob.models.BlobListDetails;
// Configure comprehensive blob listing
ListBlobsOptions listOptions = new ListBlobsOptions()
.setPrefix("logs/2023/") // Filter by prefix
.setMaxResultsPerPage(100) // Pagination
.setDetails(new BlobListDetails()
.setRetrieveDeletedBlobs(true) // Include soft-deleted blobs
.setRetrieveMetadata(true) // Include blob metadata
.setRetrieveTags(true) // Include blob tags
.setRetrieveSnapshots(true) // Include blob snapshots
.setRetrieveVersions(true) // Include blob versions
.setRetrieveUncommittedBlobs(false) // Exclude uncommitted blocks
.setRetrieveCopy(true) // Include copy information
.setRetrieveImmutabilityPolicy(true) // Include immutability policies
.setRetrieveLegalHolds(true)); // Include legal holds
// List blobs with comprehensive information
PagedIterable<BlobItem> blobs = containerClient.listBlobs(listOptions, Duration.ofMinutes(2));
for (BlobItem blob : blobs) {
System.out.println("Blob: " + blob.getName());
System.out.println("Size: " + blob.getProperties().getContentLength() + " bytes");
System.out.println("Last modified: " + blob.getProperties().getLastModified());
System.out.println("Is deleted: " + blob.isDeleted());
System.out.println("Is current version: " + blob.isCurrentVersion());
// Display metadata if retrieved
if (blob.getMetadata() != null) {
System.out.println("Metadata:");
blob.getMetadata().forEach((key, value) ->
System.out.println(" " + key + ": " + value));
}
// Display tags if retrieved
if (blob.getTags() != null) {
System.out.println("Tags:");
blob.getTags().forEach((key, value) ->
System.out.println(" " + key + ": " + value));
}
System.out.println("---");
}
// Hierarchical listing (folder-like structure)
ListBlobsOptions hierarchicalOptions = new ListBlobsOptions()
.setPrefix("documents/")
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
PagedIterable<BlobItem> hierarchicalBlobs = containerClient.listBlobsByHierarchy(
"/", // delimiter
hierarchicalOptions,
Duration.ofMinutes(1)
);
for (BlobItem item : hierarchicalBlobs) {
if (item.isPrefix()) {
System.out.println("Directory: " + item.getName());
} else {
System.out.println("File: " + item.getName());
}
}Configuration for finding blobs by tags.
import com.azure.storage.blob.options.FindBlobsOptions;
// Configure tag-based blob search
String tagQuery = "\"environment\" = 'production' AND \"team\" = 'backend' AND \"retention-years\" >= '5'";
FindBlobsOptions findOptions = new FindBlobsOptions(tagQuery)
.setMaxResultsPerPage(50); // Limit results per page
// Find blobs matching tag criteria
PagedIterable<TaggedBlobItem> taggedBlobs = serviceClient.findBlobsByTags(
findOptions,
Duration.ofMinutes(2)
);
System.out.println("Blobs matching tag query: " + tagQuery);
for (TaggedBlobItem taggedBlob : taggedBlobs) {
System.out.println("Blob: " + taggedBlob.getName());
System.out.println("Container: " + taggedBlob.getContainerName());
System.out.println("Tags: " + taggedBlob.getTags().toMap());
System.out.println("---");
}
// Complex tag query examples
String[] complexQueries = {
"\"cost-center\" = '12345'", // Simple equality
"\"retention-years\" >= '7'", // Numeric comparison
"\"environment\" = 'production' OR \"environment\" = 'staging'", // OR condition
"\"department\" = 'finance' AND \"classification\" <> 'public'", // AND with not equal
"\"created-date\" >= '2023-01-01' AND \"created-date\" < '2024-01-01'" // Date range
};
for (String query : complexQueries) {
System.out.println("Query: " + query);
PagedIterable<TaggedBlobItem> results = serviceClient.findBlobsByTags(
new FindBlobsOptions(query).setMaxResultsPerPage(10),
Duration.ofSeconds(30)
);
int count = 0;
for (TaggedBlobItem item : results) {
count++;
if (count > 5) break; // Limit output
}
System.out.println("Found " + count + "+ matching blobs");
System.out.println();
}Configuration for optimizing parallel data transfer.
import com.azure.storage.blob.models.ParallelTransferOptions;
import com.azure.storage.blob.ProgressReceiver;
// Create performance-optimized transfer options
public ParallelTransferOptions createOptimalTransferOptions(
long fileSize,
String networkCondition,
boolean showProgress) {
ParallelTransferOptions options = new ParallelTransferOptions();
// Optimize based on file size
if (fileSize < 16 * 1024 * 1024) { // < 16MB
options.setBlockSizeLong(1 * 1024 * 1024L); // 1MB blocks
options.setMaxConcurrency(2);
options.setMaxSingleUploadSizeLong(fileSize); // Single upload for small files
} else if (fileSize < 256 * 1024 * 1024) { // < 256MB
options.setBlockSizeLong(4 * 1024 * 1024L); // 4MB blocks
options.setMaxConcurrency(4);
options.setMaxSingleUploadSizeLong(32 * 1024 * 1024L); // 32MB threshold
} else if (fileSize < 1024 * 1024 * 1024) { // < 1GB
options.setBlockSizeLong(8 * 1024 * 1024L); // 8MB blocks
options.setMaxConcurrency(8);
options.setMaxSingleUploadSizeLong(64 * 1024 * 1024L); // 64MB threshold
} else { // >= 1GB
options.setBlockSizeLong(16 * 1024 * 1024L); // 16MB blocks
options.setMaxConcurrency(16);
options.setMaxSingleUploadSizeLong(128 * 1024 * 1024L); // 128MB threshold
}
// Adjust for network conditions
switch (networkCondition.toLowerCase()) {
case "slow":
options.setMaxConcurrency(Math.max(1, options.getMaxConcurrency() / 2));
options.setBlockSizeLong(Math.max(1024 * 1024L, options.getBlockSizeLong() / 2));
break;
case "fast":
options.setMaxConcurrency(Math.min(32, options.getMaxConcurrency() * 2));
break;
case "satellite":
options.setMaxConcurrency(1); // High latency, low concurrency
options.setBlockSizeLong(32 * 1024 * 1024L); // Larger blocks
break;
}
// Add progress tracking if requested
if (showProgress) {
options.setProgressListener(new DetailedProgressReceiver(fileSize));
}
return options;
}
// Custom progress receiver with detailed reporting
class DetailedProgressReceiver implements ProgressReceiver {
private final long totalSize;
private long lastReported = 0;
private final long startTime;
public DetailedProgressReceiver(long totalSize) {
this.totalSize = totalSize;
this.startTime = System.currentTimeMillis();
}
@Override
public void reportProgress(long bytesTransferred) {
// Report every 5% or every 10MB, whichever is less frequent
long reportInterval = Math.min(totalSize / 20, 10 * 1024 * 1024L);
if (bytesTransferred - lastReported >= reportInterval || bytesTransferred == totalSize) {
double percentage = (double) bytesTransferred / totalSize * 100;
long elapsedMs = System.currentTimeMillis() - startTime;
double rate = elapsedMs > 0 ? (bytesTransferred / 1024.0 / 1024.0) / (elapsedMs / 1000.0) : 0;
System.out.printf("Transfer progress: %.1f%% (%d/%d bytes) - %.2f MB/s%n",
percentage, bytesTransferred, totalSize, rate);
lastReported = bytesTransferred;
}
}
}
// Usage examples
long fileSize = 500 * 1024 * 1024L; // 500MB file
// Optimized for good network connection
ParallelTransferOptions fastOptions = createOptimalTransferOptions(fileSize, "fast", true);
BlobUploadFromFileOptions fastUpload = new BlobUploadFromFileOptions("large-file.dat")
.setParallelTransferOptions(fastOptions);
// Optimized for slow/unreliable connection
ParallelTransferOptions slowOptions = createOptimalTransferOptions(fileSize, "slow", true);
BlobUploadFromFileOptions slowUpload = new BlobUploadFromFileOptions("large-file.dat")
.setParallelTransferOptions(slowOptions);
// Optimized for satellite/high-latency connection
ParallelTransferOptions satelliteOptions = createOptimalTransferOptions(fileSize, "satellite", true);
BlobUploadFromFileOptions satelliteUpload = new BlobUploadFromFileOptions("large-file.dat")
.setParallelTransferOptions(satelliteOptions);Configuration for download retry behavior.
import com.azure.storage.blob.models.DownloadRetryOptions;
// Configure robust download retry options
DownloadRetryOptions retryOptions = new DownloadRetryOptions()
.setMaxRetryRequests(5); // Maximum number of retry attempts
// Use in download operations
BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions("reliable-download.dat")
.setDownloadRetryOptions(retryOptions)
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(2 * 1024 * 1024L) // Smaller blocks for reliability
.setMaxConcurrency(4));
// Download with retry configuration
try {
Response<BlobProperties> response = blobClient.downloadToFileWithResponse(
downloadOptions,
Duration.ofMinutes(20), // Longer timeout for retries
Context.NONE
);
System.out.println("Download completed with retries if needed");
} catch (Exception ex) {
System.err.println("Download failed after retries: " + ex.getMessage());
}
// Configure for different reliability needs
public DownloadRetryOptions createRetryOptions(String reliabilityLevel) {
switch (reliabilityLevel.toLowerCase()) {
case "high":
return new DownloadRetryOptions().setMaxRetryRequests(10);
case "medium":
return new DownloadRetryOptions().setMaxRetryRequests(5);
case "low":
return new DownloadRetryOptions().setMaxRetryRequests(2);
case "none":
return new DownloadRetryOptions().setMaxRetryRequests(0);
default:
return new DownloadRetryOptions().setMaxRetryRequests(3);
}
}Configuration for customer-managed encryption keys.
import com.azure.storage.blob.models.CustomerProvidedKey;
import com.azure.storage.blob.models.EncryptionAlgorithmType;
// Configure customer-provided encryption key
CustomerProvidedKey cpk = new CustomerProvidedKey("customer-encryption-key-32-bytes!")
.setEncryptionAlgorithm(EncryptionAlgorithmType.AES256);
// Create client with customer-provided key
BlobServiceClient encryptedServiceClient = new BlobServiceClientBuilder()
.connectionString(connectionString)
.customerProvidedKey(cpk)
.buildClient();
// All blobs created through this client will use the CPK
BlobClient encryptedBlobClient = encryptedServiceClient
.getBlobContainerClient("encrypted-container")
.getBlobClient("encrypted-file.txt");
// Upload encrypted content
encryptedBlobClient.upload(BinaryData.fromString("Sensitive encrypted content"), true);
// Download requires the same key
BinaryData decryptedContent = encryptedBlobClient.downloadContent();
System.out.println("Decrypted content: " + decryptedContent.toString());
// Key rotation example
CustomerProvidedKey newCpk = new CustomerProvidedKey("new-encryption-key-32-bytes-long!")
.setEncryptionAlgorithm(EncryptionAlgorithmType.AES256);
// Create new client with rotated key
BlobServiceClient rotatedKeyClient = new BlobServiceClientBuilder()
.connectionString(connectionString)
.customerProvidedKey(newCpk)
.buildClient();
// Copy blob with new encryption key
BlobClient sourceBlobClient = encryptedServiceClient
.getBlobContainerClient("encrypted-container")
.getBlobClient("encrypted-file.txt");
BlobClient targetBlobClient = rotatedKeyClient
.getBlobContainerClient("encrypted-container")
.getBlobClient("re-encrypted-file.txt");
String copyId = targetBlobClient.copyFromUrl(sourceBlobClient.getBlobUrl());
System.out.println("Key rotation copy started: " + copyId);// Configure encryption scope at service client level
BlobServiceClient scopedServiceClient = new BlobServiceClientBuilder()
.connectionString(connectionString)
.encryptionScope("production-encryption-scope")
.buildClient();
// All operations will use the configured encryption scope
BlobContainerClient scopedContainer = scopedServiceClient.getBlobContainerClient("scoped-container");
BlobClient scopedBlob = scopedContainer.getBlobClient("scoped-file.txt");
// Upload with automatic encryption scope application
scopedBlob.upload(BinaryData.fromString("Content encrypted with scope"), true);
// Container-level encryption scope configuration
BlobContainerEncryptionScope containerScope = new BlobContainerEncryptionScope()
.setDefaultEncryptionScope("container-specific-scope")
.setPreventEncryptionScopeOverride(true); // Enforce scope for all blobs
BlobContainerCreateOptions scopedContainerOptions = new BlobContainerCreateOptions()
.setEncryptionScope(containerScope)
.setMetadata(Map.of("encryption", "enforced"));
containerClient.createWithResponse(scopedContainerOptions, Duration.ofMinutes(1), Context.NONE);import com.azure.core.http.HttpClient;
import com.azure.core.http.netty.NettyAsyncHttpClientBuilder;
import com.azure.core.http.policy.*;
import com.azure.core.util.Configuration;
import java.time.Duration;
// Configure custom HTTP client with detailed settings
HttpClient customHttpClient = new NettyAsyncHttpClientBuilder()
.connectionTimeout(Duration.ofSeconds(60))
.responseTimeout(Duration.ofMinutes(5))
.readTimeout(Duration.ofMinutes(2))
.writeTimeout(Duration.ofMinutes(2))
.maxIdleTime(Duration.ofSeconds(60))
.maxLifeTime(Duration.ofMinutes(10))
.maxChunkSize(8192)
.maxInitialLineLength(4096)
.maxHeaderSize(8192)
.build();
// Configure retry policy with custom settings
RetryPolicy customRetryPolicy = new RetryPolicy("exponential", 5) {
@Override
public Duration calculateRetryDelay(int retryAttempt) {
// Custom exponential backoff: 1s, 2s, 4s, 8s, 16s
return Duration.ofSeconds((long) Math.pow(2, retryAttempt));
}
};
// Configure request retry options
RequestRetryOptions retryOptions = new RequestRetryOptions(
RetryPolicyType.EXPONENTIAL,
5, // maxTries
Duration.ofMinutes(2), // tryTimeout
Duration.ofSeconds(1), // retryDelay
Duration.ofSeconds(30), // maxRetryDelay
null // secondaryHost
);
// Create service client with comprehensive configuration
BlobServiceClient configuredClient = new BlobServiceClientBuilder()
.endpoint("https://myaccount.blob.core.windows.net")
.credential(new DefaultAzureCredentialBuilder().build())
.httpClient(customHttpClient)
.retryPolicy(customRetryPolicy)
.retryOptions(retryOptions)
.addPolicy(new UserAgentPolicy("MyApplication/1.0.0"))
.addPolicy(new RequestIdPolicy())
.configuration(Configuration.getGlobalConfiguration())
.serviceVersion(BlobServiceVersion.getLatest())
.buildClient();
// Custom logging policy
HttpLogOptions logOptions = new HttpLogOptions()
.setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
.addAllowedHeaderName("x-ms-request-id")
.addAllowedHeaderName("x-ms-version")
.addAllowedQueryParamName("timeout");
BlobServiceClient loggingClient = new BlobServiceClientBuilder()
.connectionString(connectionString)
.httpLogOptions(logOptions)
.buildClient();import com.azure.core.util.Context;
import com.azure.core.util.tracing.Tracer;
// Create context with custom values
Context customContext = Context.NONE
.addData("operation-id", UUID.randomUUID().toString())
.addData("user-id", "user-12345")
.addData("correlation-id", "corr-" + System.currentTimeMillis());
// Use context in operations for tracing and correlation
Response<BlockBlobItem> contextualUpload = blobClient.uploadWithResponse(
new BlobParallelUploadOptions(BinaryData.fromString("Content with context"))
.setMetadata(Map.of("operation-id", customContext.getData("operation-id").orElse("unknown").toString())),
Duration.ofMinutes(5),
customContext
);
// Extract request ID from response for logging
String requestId = contextualUpload.getHeaders().getValue("x-ms-request-id");
System.out.println("Upload completed with request ID: " + requestId);
// Context with timeout override
Context timeoutContext = Context.NONE.addData("timeout", Duration.ofMinutes(10));
// Context with custom headers
Context headerContext = Context.NONE.addData("custom-header", "custom-value");// Comprehensive progress tracking with metrics
public class AdvancedProgressTracker implements ProgressReceiver {
private final String operationName;
private final long totalSize;
private final long startTime;
private long lastUpdate;
private long bytesAtLastUpdate;
private final List<Double> speedSamples = new ArrayList<>();
public AdvancedProgressTracker(String operationName, long totalSize) {
this.operationName = operationName;
this.totalSize = totalSize;
this.startTime = System.currentTimeMillis();
this.lastUpdate = startTime;
}
@Override
public void reportProgress(long bytesTransferred) {
long currentTime = System.currentTimeMillis();
if (currentTime - lastUpdate >= 1000 || bytesTransferred == totalSize) { // Update every second
double elapsedSeconds = (currentTime - startTime) / 1000.0;
double overallSpeed = elapsedSeconds > 0 ? (bytesTransferred / 1024.0 / 1024.0) / elapsedSeconds : 0;
// Calculate instantaneous speed
double instantSpeed = 0;
if (lastUpdate > 0) {
double intervalSeconds = (currentTime - lastUpdate) / 1000.0;
long intervalBytes = bytesTransferred - bytesAtLastUpdate;
instantSpeed = intervalSeconds > 0 ? (intervalBytes / 1024.0 / 1024.0) / intervalSeconds : 0;
speedSamples.add(instantSpeed);
}
double percentage = (double) bytesTransferred / totalSize * 100;
// Calculate ETA
String eta = "unknown";
if (overallSpeed > 0) {
long remainingBytes = totalSize - bytesTransferred;
long etaSeconds = (long) (remainingBytes / 1024.0 / 1024.0 / overallSpeed);
eta = formatDuration(etaSeconds);
}
System.out.printf("[%s] %.1f%% (%s/%s) - Speed: %.2f MB/s (avg: %.2f MB/s) - ETA: %s%n",
operationName,
percentage,
formatBytes(bytesTransferred),
formatBytes(totalSize),
instantSpeed,
overallSpeed,
eta
);
lastUpdate = currentTime;
bytesAtLastUpdate = bytesTransferred;
}
}
private String formatBytes(long bytes) {
if (bytes < 1024) return bytes + " B";
if (bytes < 1024 * 1024) return String.format("%.1f KB", bytes / 1024.0);
if (bytes < 1024 * 1024 * 1024) return String.format("%.1f MB", bytes / 1024.0 / 1024.0);
return String.format("%.2f GB", bytes / 1024.0 / 1024.0 / 1024.0);
}
private String formatDuration(long seconds) {
if (seconds < 60) return seconds + "s";
if (seconds < 3600) return String.format("%dm %ds", seconds / 60, seconds % 60);
return String.format("%dh %dm", seconds / 3600, (seconds % 3600) / 60);
}
public void printSummary() {
long totalTime = System.currentTimeMillis() - startTime;
double avgSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).average().orElse(0.0);
double maxSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).max().orElse(0.0);
double minSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).min().orElse(0.0);
System.out.println("\n" + operationName + " Summary:");
System.out.printf("Total time: %s%n", formatDuration(totalTime / 1000));
System.out.printf("Average speed: %.2f MB/s%n", avgSpeed);
System.out.printf("Max speed: %.2f MB/s%n", maxSpeed);
System.out.printf("Min speed: %.2f MB/s%n", minSpeed);
System.out.printf("Total transferred: %s%n", formatBytes(totalSize));
}
}
// Usage with comprehensive monitoring
long fileSize = Files.size(Paths.get("large-file.bin"));
AdvancedProgressTracker tracker = new AdvancedProgressTracker("Large File Upload", fileSize);
ParallelTransferOptions monitoredOptions = new ParallelTransferOptions()
.setProgressListener(tracker)
.setBlockSizeLong(8 * 1024 * 1024L)
.setMaxConcurrency(6);
BlobUploadFromFileOptions uploadOptions = new BlobUploadFromFileOptions("large-file.bin")
.setParallelTransferOptions(monitoredOptions);
try {
blobClient.uploadFromFileWithResponse(uploadOptions, Duration.ofMinutes(30), Context.NONE);
tracker.printSummary();
} catch (Exception ex) {
System.err.println("Upload failed: " + ex.getMessage());
tracker.printSummary();
}Install with Tessl CLI
npx tessl i tessl/maven-com-azure--azure-storage-blob