Microsoft Azure client library for Blob Storage - Azure Blob Storage is Microsoft's object storage solution for the cloud, optimized for storing massive amounts of unstructured data such as text or binary data.
—
Blob clients provide comprehensive functionality for working with individual blobs, including upload, download, metadata management, and content manipulation.
The synchronous client for individual blob operations.
import com.azure.storage.blob.BlobClient;
import com.azure.storage.blob.BlobClientBuilder;
import com.azure.storage.blob.BlobContainerClient;
// From container client (recommended)
BlobContainerClient containerClient = serviceClient.getBlobContainerClient("mycontainer");
BlobClient blobClient = containerClient.getBlobClient("myblob.txt");
// From service client
BlobClient blobClient = serviceClient.getBlobContainerClient("mycontainer")
.getBlobClient("myblob.txt");
// Direct creation using builder
BlobClient blobClient = new BlobClientBuilder()
.connectionString("DefaultEndpointsProtocol=https;AccountName=myaccount;...")
.containerName("mycontainer")
.blobName("myblob.txt")
.buildClient();
// With snapshot or version
BlobClient snapshotClient = new BlobClientBuilder()
.connectionString("DefaultEndpointsProtocol=https;AccountName=myaccount;...")
.containerName("mycontainer")
.blobName("myblob.txt")
.snapshot("2023-12-01T10:30:00.1234567Z")
.buildClient();
BlobClient versionClient = new BlobClientBuilder()
.connectionString("DefaultEndpointsProtocol=https;AccountName=myaccount;...")
.containerName("mycontainer")
.blobName("myblob.txt")
.versionId("01D64EAD4C6B7A00")
.buildClient();import com.azure.core.util.BinaryData;
import com.azure.storage.blob.models.*;
import com.azure.storage.blob.options.*;
// Simple upload from string
String content = "Hello, Azure Blob Storage!";
blobClient.upload(BinaryData.fromString(content), true); // overwrite if exists
// Upload from byte array
byte[] data = "Binary content".getBytes(StandardCharsets.UTF_8);
blobClient.upload(BinaryData.fromBytes(data), false); // don't overwrite
// Upload from InputStream
try (FileInputStream fileStream = new FileInputStream("localfile.txt")) {
long fileSize = Files.size(Paths.get("localfile.txt"));
BlockBlobItem uploadResult = blobClient.upload(fileStream, fileSize, true);
System.out.println("Upload completed. ETag: " + uploadResult.getETag());
}
// Upload from file
blobClient.uploadFromFile("localfile.txt");
// Upload with overwrite protection
try {
blobClient.uploadFromFile("localfile.txt", false); // don't overwrite
} catch (BlobStorageException ex) {
if (ex.getErrorCode() == BlobErrorCode.BLOB_ALREADY_EXISTS) {
System.out.println("Blob already exists, skipping upload");
} else {
throw ex;
}
}// Upload with comprehensive options
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentType("text/plain")
.setContentLanguage("en-US")
.setContentEncoding("utf-8")
.setCacheControl("no-cache, max-age=0")
.setContentDisposition("attachment; filename=data.txt");
Map<String, String> metadata = Map.of(
"author", "system",
"category", "logs",
"environment", "production"
);
Map<String, String> tags = Map.of(
"department", "engineering",
"project", "webapp",
"cost-center", "12345"
);
ParallelTransferOptions transferOptions = new ParallelTransferOptions()
.setBlockSizeLong(4 * 1024 * 1024L) // 4MB blocks
.setMaxConcurrency(8)
.setProgressListener(bytesTransferred ->
System.out.println("Uploaded: " + bytesTransferred + " bytes"));
BlobRequestConditions conditions = new BlobRequestConditions()
.setIfNoneMatch("*"); // Only upload if blob doesn't exist
// Upload with all options
BlobParallelUploadOptions uploadOptions = new BlobParallelUploadOptions(
BinaryData.fromString("Content with all options"))
.setHeaders(headers)
.setMetadata(metadata)
.setTags(tags)
.setTier(AccessTier.HOT)
.setParallelTransferOptions(transferOptions)
.setRequestConditions(conditions);
Response<BlockBlobItem> uploadResponse = blobClient.uploadWithResponse(
uploadOptions,
Duration.ofMinutes(10),
Context.NONE
);
System.out.println("Upload status: " + uploadResponse.getStatusCode());
System.out.println("ETag: " + uploadResponse.getValue().getETag());
System.out.println("Version ID: " + uploadResponse.getValue().getVersionId());// Upload large file with optimized settings
BlobUploadFromFileOptions fileUploadOptions = new BlobUploadFromFileOptions("largefile.dat")
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(8 * 1024 * 1024L) // 8MB blocks for large files
.setMaxConcurrency(6)
.setMaxSingleUploadSizeLong(256 * 1024 * 1024L)) // 256MB single upload threshold
.setHeaders(new BlobHttpHeaders()
.setContentType("application/octet-stream"))
.setMetadata(Map.of(
"upload-date", OffsetDateTime.now().toString(),
"file-size", String.valueOf(Files.size(Paths.get("largefile.dat")))))
.setTier(AccessTier.HOT)
.setRequestConditions(new BlobRequestConditions()
.setIfNoneMatch("*"));
Response<BlockBlobItem> fileUploadResponse = blobClient.uploadFromFileWithResponse(
fileUploadOptions,
Duration.ofMinutes(30),
Context.NONE
);// Download to BinaryData
BinaryData downloadedData = blobClient.downloadContent();
String content = downloadedData.toString();
byte[] bytes = downloadedData.toBytes();
// Download to OutputStream
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
BlobProperties properties = blobClient.download(outputStream);
byte[] downloadedBytes = outputStream.toByteArray();
System.out.println("Downloaded " + downloadedBytes.length + " bytes");
System.out.println("Content type: " + properties.getContentType());
}
// Download to file
BlobProperties downloadProperties = blobClient.downloadToFile("downloaded-file.txt");
System.out.println("File downloaded, size: " + downloadProperties.getBlobSize());
// Download with overwrite protection
try {
blobClient.downloadToFile("downloaded-file.txt", false); // don't overwrite
} catch (UncheckedIOException ex) {
System.err.println("File already exists: " + ex.getMessage());
}
// Streaming download
try (InputStream inputStream = blobClient.openInputStream()) {
// Process stream incrementally
byte[] buffer = new byte[8192];
int bytesRead;
while ((bytesRead = inputStream.read(buffer)) != -1) {
// Process buffer content
System.out.println("Read " + bytesRead + " bytes");
}
}// Download specific range
BlobRange range = new BlobRange(1024, 4096L); // Download bytes 1024-5120
BlobDownloadContentResponse rangeResponse = blobClient.downloadContentWithResponse(
new BlobDownloadOptions()
.setRange(range),
Duration.ofMinutes(5),
Context.NONE
);
BinaryData rangeData = rangeResponse.getValue();
System.out.println("Downloaded range: " + rangeData.getLength() + " bytes");
// Download to file with advanced options
DownloadRetryOptions retryOptions = new DownloadRetryOptions()
.setMaxRetryRequests(5);
BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions("advanced-download.dat")
.setRange(new BlobRange(0)) // Download from start
.setParallelTransferOptions(new ParallelTransferOptions()
.setBlockSizeLong(4 * 1024 * 1024L)
.setMaxConcurrency(4))
.setDownloadRetryOptions(retryOptions)
.setRequestConditions(new BlobRequestConditions()
.setIfMatch(blobClient.getProperties().getETag())) // Download only if not modified
.setRetrieveContentMd5(true)
.setOpenOptions(Set.of(
StandardOpenOption.CREATE,
StandardOpenOption.WRITE,
StandardOpenOption.TRUNCATE_EXISTING));
Response<BlobProperties> advancedDownloadResponse = blobClient.downloadToFileWithResponse(
downloadOptions,
Duration.ofMinutes(15),
Context.NONE
);
System.out.println("Advanced download completed");
System.out.println("Content MD5: " + Arrays.toString(
advancedDownloadResponse.getValue().getContentMd5()));// Get blob properties
BlobProperties properties = blobClient.getProperties();
System.out.println("Blob size: " + properties.getBlobSize() + " bytes");
System.out.println("Content type: " + properties.getContentType());
System.out.println("Last modified: " + properties.getLastModified());
System.out.println("ETag: " + properties.getETag());
System.out.println("Blob type: " + properties.getBlobType());
System.out.println("Access tier: " + properties.getAccessTier());
System.out.println("Creation time: " + properties.getCreationTime());
System.out.println("Is encrypted: " + properties.isServerEncrypted());
System.out.println("Metadata: " + properties.getMetadata());
// Check if blob exists
Boolean exists = blobClient.exists();
if (!exists) {
System.out.println("Blob does not exist");
return;
}
// Get properties with conditions
BlobRequestConditions conditions = new BlobRequestConditions()
.setIfModifiedSince(OffsetDateTime.now().minusDays(1));
Response<BlobProperties> propertiesResponse = blobClient.getPropertiesWithResponse(
conditions,
Duration.ofSeconds(30),
Context.NONE
);// Set HTTP headers
BlobHttpHeaders newHeaders = new BlobHttpHeaders()
.setContentType("application/json")
.setContentLanguage("en-US")
.setContentEncoding("gzip")
.setCacheControl("public, max-age=3600")
.setContentDisposition("inline; filename=data.json");
blobClient.setHttpHeaders(newHeaders);
// Set HTTP headers with conditions
BlobRequestConditions headerConditions = new BlobRequestConditions()
.setIfMatch(properties.getETag());
Response<Void> headerResponse = blobClient.setHttpHeadersWithResponse(
newHeaders,
headerConditions,
Duration.ofSeconds(30),
Context.NONE
);
// Set metadata
Map<String, String> newMetadata = Map.of(
"processed", "true",
"processor", "batch-job-v2",
"processed-time", OffsetDateTime.now().toString(),
"checksum", "abc123def456"
);
blobClient.setMetadata(newMetadata);
// Set metadata with conditions
Response<Void> metadataResponse = blobClient.setMetadataWithResponse(
newMetadata,
new BlobRequestConditions().setIfUnmodifiedSince(OffsetDateTime.now()),
Duration.ofSeconds(30),
Context.NONE
);// Set tags
Map<String, String> tags = Map.of(
"environment", "production",
"team", "backend",
"cost-center", "12345",
"project", "webapp",
"classification", "internal"
);
blobClient.setTags(tags);
// Get current tags
Map<String, String> currentTags = blobClient.getTags();
System.out.println("Current tags: " + currentTags);
// Set tags with options
BlobSetTagsOptions setTagsOptions = new BlobSetTagsOptions(tags)
.setRequestConditions(new BlobRequestConditions()
.setTagsConditions("\"environment\" = 'staging'"));
Response<Void> setTagsResponse = blobClient.setTagsWithResponse(
setTagsOptions,
Duration.ofSeconds(30),
Context.NONE
);
// Get tags with options
BlobGetTagsOptions getTagsOptions = new BlobGetTagsOptions()
.setRequestConditions(new BlobRequestConditions()
.setIfMatch(blobClient.getProperties().getETag()));
Response<Map<String, String>> getTagsResponse = blobClient.getTagsWithResponse(
getTagsOptions,
Duration.ofSeconds(30),
Context.NONE
);// Set access tier for cost optimization
blobClient.setAccessTier(AccessTier.COOL);
// Set access tier with options
BlobSetAccessTierOptions tierOptions = new BlobSetAccessTierOptions(AccessTier.ARCHIVE)
.setPriority(RehydratePriority.STANDARD)
.setLeaseId(leaseId)
.setTagsConditions("\"environment\" = 'development'");
Response<Void> tierResponse = blobClient.setAccessTierWithResponse(
tierOptions,
Duration.ofSeconds(30),
Context.NONE
);
// Check current access tier and archive status
BlobProperties tierProperties = blobClient.getProperties();
System.out.println("Current tier: " + tierProperties.getAccessTier());
System.out.println("Archive status: " + tierProperties.getArchiveStatus());
System.out.println("Tier inferred: " + tierProperties.getAccessTierInferred());
System.out.println("Tier change time: " + tierProperties.getAccessTierChangeTime());// Create snapshot
BlobClient snapshotClient = blobClient.createSnapshot();
String snapshotId = snapshotClient.getSnapshotId();
System.out.println("Created snapshot: " + snapshotId);
// Create snapshot with metadata
Map<String, String> snapshotMetadata = Map.of(
"snapshot-reason", "backup",
"created-by", "automated-backup",
"retention-days", "30"
);
Response<BlobClient> snapshotResponse = blobClient.createSnapshotWithResponse(
snapshotMetadata,
new BlobRequestConditions().setIfMatch(blobClient.getProperties().getETag()),
Duration.ofSeconds(30),
Context.NONE
);
BlobClient snapshotWithMetadata = snapshotResponse.getValue();
System.out.println("Snapshot with metadata: " + snapshotWithMetadata.getSnapshotId());
// Access existing snapshot
BlobClient existingSnapshot = containerClient.getBlobClient(blobName, snapshotId);
BlobProperties snapshotProps = existingSnapshot.getProperties();
System.out.println("Snapshot size: " + snapshotProps.getBlobSize());// Simple copy from another blob
String sourceUrl = "https://sourceaccount.blob.core.windows.net/sourcecontainer/sourceblob.txt";
String copyId = blobClient.copyFromUrl(sourceUrl);
System.out.println("Copy operation started: " + copyId);
// Synchronous copy with timeout
try {
String syncCopyId = blobClient.copyFromUrl(sourceUrl, Duration.ofMinutes(5));
System.out.println("Synchronous copy completed: " + syncCopyId);
} catch (Exception ex) {
System.err.println("Copy operation failed: " + ex.getMessage());
}
// Asynchronous copy with polling
SyncPoller<BlobCopyInfo, Void> copyPoller = blobClient.beginCopy(sourceUrl, Duration.ofMinutes(1));
// Poll for completion
copyPoller.waitForCompletion(Duration.ofMinutes(30));
BlobCopyInfo finalCopyInfo = copyPoller.getFinalResult();
System.out.println("Copy status: " + finalCopyInfo.getCopyStatus());
System.out.println("Copy progress: " + finalCopyInfo.getBytesTransferred() +
"/" + finalCopyInfo.getTotalBytes());
// Copy with options
Map<String, String> copyMetadata = Map.of(
"copy-source", sourceUrl,
"copy-time", OffsetDateTime.now().toString()
);
BlobBeginCopyOptions copyOptions = new BlobBeginCopyOptions(sourceUrl)
.setMetadata(copyMetadata)
.setTier(AccessTier.HOT)
.setRequestConditions(new BlobRequestConditions()
.setIfNoneMatch("*"))
.setSourceRequestConditions(new BlobRequestConditions()
.setIfModifiedSince(OffsetDateTime.now().minusDays(1)));
SyncPoller<BlobCopyInfo, Void> advancedCopyPoller = blobClient.beginCopy(copyOptions, Duration.ofSeconds(30));// Simple delete
blobClient.delete();
// Delete if exists
boolean deleted = blobClient.deleteIfExists();
if (deleted) {
System.out.println("Blob was deleted");
} else {
System.out.println("Blob did not exist");
}
// Delete with options
DeleteSnapshotsOptionType snapshotOption = DeleteSnapshotsOptionType.INCLUDE;
BlobRequestConditions deleteConditions = new BlobRequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now().plusMinutes(1));
Response<Void> deleteResponse = blobClient.deleteWithResponse(
snapshotOption,
deleteConditions,
Duration.ofSeconds(30),
Context.NONE
);
System.out.println("Delete status: " + deleteResponse.getStatusCode());
// Delete if exists with options
BlobDeleteOptions deleteOptions = new BlobDeleteOptions()
.setDeleteSnapshotsOptions(DeleteSnapshotsOptionType.ONLY)
.setRequestConditions(new BlobRequestConditions()
.setLeaseId(leaseId));
Response<Boolean> deleteIfExistsResponse = blobClient.deleteIfExistsWithResponse(
deleteOptions,
Duration.ofSeconds(30),
Context.NONE
);// Undelete soft-deleted blob (if soft delete is enabled)
try {
blobClient.undelete();
System.out.println("Blob restored from soft delete");
} catch (BlobStorageException ex) {
if (ex.getErrorCode() == BlobErrorCode.BLOB_NOT_FOUND) {
System.out.println("Blob was not found or not soft-deleted");
} else {
throw ex;
}
}
// Undelete with response
Response<Void> undeleteResponse = blobClient.undeleteWithResponse(
Duration.ofSeconds(30),
Context.NONE
);
System.out.println("Undelete status: " + undeleteResponse.getStatusCode());The asynchronous client for blob operations using reactive programming.
import com.azure.storage.blob.BlobAsyncClient;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Flux;
// From container async client
BlobContainerAsyncClient asyncContainerClient = serviceAsyncClient.getBlobContainerAsyncClient("mycontainer");
BlobAsyncClient asyncBlobClient = asyncContainerClient.getBlobAsyncClient("myblob.txt");
// From sync client
BlobAsyncClient asyncBlobClient = blobClient.getAsyncClient();
// Direct creation
BlobAsyncClient asyncBlobClient = new BlobClientBuilder()
.connectionString("DefaultEndpointsProtocol=https;AccountName=myaccount;...")
.containerName("mycontainer")
.blobName("myblob.txt")
.buildAsyncClient();// Simple async upload
Mono<BlockBlobItem> uploadMono = asyncBlobClient.upload(
BinaryData.fromString("Async upload content"),
true
);
uploadMono
.doOnSuccess(result -> System.out.println("Upload completed: " + result.getETag()))
.doOnError(ex -> System.err.println("Upload failed: " + ex.getMessage()))
.subscribe();
// Upload with progress tracking
ParallelTransferOptions asyncTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(1024 * 1024L)
.setMaxConcurrency(4)
.setProgressListener(bytesTransferred ->
System.out.println("Async progress: " + bytesTransferred + " bytes"));
BlobParallelUploadOptions asyncUploadOptions = new BlobParallelUploadOptions(
BinaryData.fromString("Large content for async upload"))
.setParallelTransferOptions(asyncTransferOptions)
.setHeaders(new BlobHttpHeaders().setContentType("text/plain"))
.setMetadata(Map.of("async", "true"));
Mono<Response<BlockBlobItem>> asyncUploadResponse = asyncBlobClient.uploadWithResponse(
asyncUploadOptions
);
asyncUploadResponse
.doOnNext(response -> {
System.out.println("Async upload status: " + response.getStatusCode());
System.out.println("ETag: " + response.getValue().getETag());
})
.subscribe();// Download content asynchronously
Mono<BinaryData> downloadMono = asyncBlobClient.downloadContent();
downloadMono
.doOnNext(data -> System.out.println("Downloaded " + data.getLength() + " bytes"))
.doOnError(ex -> System.err.println("Download failed: " + ex.getMessage()))
.subscribe();
// Streaming download
Flux<ByteBuffer> downloadFlux = asyncBlobClient.downloadStream();
downloadFlux
.doOnNext(buffer -> {
System.out.println("Received buffer: " + buffer.remaining() + " bytes");
// Process buffer
})
.doOnComplete(() -> System.out.println("Streaming download complete"))
.doOnError(ex -> System.err.println("Stream error: " + ex.getMessage()))
.subscribe();
// Download with reactive chains
asyncBlobClient.downloadContent()
.map(BinaryData::toString)
.filter(content -> content.contains("important"))
.doOnNext(content -> System.out.println("Processing important content"))
.flatMap(content -> {
// Chain with another async operation
return asyncBlobClient.setMetadata(Map.of("processed", "true"));
})
.subscribe();// Get properties asynchronously
Mono<BlobProperties> propertiesMono = asyncBlobClient.getProperties();
propertiesMono
.doOnNext(props -> {
System.out.println("Async - Blob size: " + props.getBlobSize());
System.out.println("Async - Content type: " + props.getContentType());
System.out.println("Async - Last modified: " + props.getLastModified());
})
.subscribe();
// Chain multiple operations
asyncBlobClient.exists()
.filter(exists -> exists)
.flatMap(exists -> asyncBlobClient.getProperties())
.flatMap(props -> {
if (props.getBlobSize() > 1024 * 1024) { // > 1MB
return asyncBlobClient.setAccessTier(AccessTier.COOL);
} else {
return Mono.empty();
}
})
.doOnSuccess(v -> System.out.println("Large blob moved to cool tier"))
.subscribe();
// Parallel operations
Mono<BlobProperties> properties = asyncBlobClient.getProperties();
Mono<Map<String, String>> tags = asyncBlobClient.getTags();
Mono.zip(properties, tags)
.doOnNext(tuple -> {
BlobProperties props = tuple.getT1();
Map<String, String> blobTags = tuple.getT2();
System.out.println("Blob: " + props.getBlobSize() + " bytes, " +
blobTags.size() + " tags");
})
.subscribe();// Comprehensive error handling
asyncBlobClient.upload(BinaryData.fromString("content"), true)
.doOnSuccess(result -> System.out.println("Upload successful"))
.onErrorResume(BlobStorageException.class, ex -> {
System.err.println("Storage error: " + ex.getErrorCode());
if (ex.getStatusCode() == 409) {
System.out.println("Blob already exists, trying to update metadata instead");
return asyncBlobClient.setMetadata(Map.of("updated", "true"))
.then(Mono.empty()); // Convert to empty result
}
return Mono.error(ex);
})
.onErrorResume(Exception.class, ex -> {
System.err.println("Unexpected error: " + ex.getMessage());
return Mono.error(new RuntimeException("Upload operation failed", ex));
})
.subscribe();
// Retry with backoff
asyncBlobClient.getProperties()
.retryWhen(Retry.backoff(3, Duration.ofSeconds(1))) // Retry up to 3 times with exponential backoff
.doOnNext(props -> System.out.println("Properties retrieved after potential retries"))
.subscribe();// Get specialized clients from base blob client
BlockBlobClient blockBlobClient = blobClient.getBlockBlobClient();
AppendBlobClient appendBlobClient = blobClient.getAppendBlobClient();
PageBlobClient pageBlobClient = blobClient.getPageBlobClient();
// Async versions
BlockBlobAsyncClient asyncBlockBlobClient = asyncBlobClient.getBlockBlobAsyncClient();
AppendBlobAsyncClient asyncAppendBlobClient = asyncBlobClient.getAppendBlobAsyncClient();
PageBlobAsyncClient asyncPageBlobClient = asyncBlobClient.getPageBlobAsyncClient();
// Use specialized operations
blockBlobClient.upload(BinaryData.fromString("Block blob content"), true);
appendBlobClient.create();
appendBlobClient.appendBlock(BinaryData.fromString("Append content"));
pageBlobClient.create(512 * 1024); // 512KB page blob// Get blob URL
String blobUrl = blobClient.getBlobUrl();
System.out.println("Blob URL: " + blobUrl);
// Parse blob URL
BlobUrlParts urlParts = BlobUrlParts.parse(blobUrl);
System.out.println("Account name: " + urlParts.getAccountName());
System.out.println("Container: " + urlParts.getBlobContainerName());
System.out.println("Blob name: " + urlParts.getBlobName());
// Generate SAS token
OffsetDateTime expiryTime = OffsetDateTime.now().plusHours(1);
BlobSasPermission permission = new BlobSasPermission()
.setReadPermission(true)
.setWritePermission(true);
BlobServiceSasSignatureValues sasValues = new BlobServiceSasSignatureValues(expiryTime, permission)
.setStartTime(OffsetDateTime.now())
.setProtocol(SasProtocol.HTTPS_ONLY);
String sasToken = blobClient.generateSas(sasValues);
String sasUrl = blobUrl + "?" + sasToken;
System.out.println("SAS URL: " + sasUrl);// Important blob constants
public static final int BLOB_DEFAULT_UPLOAD_BLOCK_SIZE = 4 * 1024 * 1024; // 4MB
public static final int BLOB_DEFAULT_NUMBER_OF_BUFFERS = 8;
public static final int BLOB_DEFAULT_HTBB_UPLOAD_BLOCK_SIZE = 100 * 1024 * 1024; // 100MB
// Use constants for configuration
ParallelTransferOptions transferOptions = new ParallelTransferOptions()
.setBlockSizeLong(BlobClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE)
.setMaxConcurrency(BlobClient.BLOB_DEFAULT_NUMBER_OF_BUFFERS);// Get storage account information through blob client
StorageAccountInfo accountInfo = blobClient.getAccountInfo();
System.out.println("Account kind: " + accountInfo.getAccountKind());
System.out.println("SKU name: " + accountInfo.getSkuName());
System.out.println("Hierarchical namespace: " + accountInfo.isHierarchicalNamespaceEnabled());
// Get account info with response details
Response<StorageAccountInfo> accountInfoResponse = blobClient.getAccountInfoWithResponse(
Duration.ofSeconds(30),
Context.NONE
);
System.out.println("Response status: " + accountInfoResponse.getStatusCode());
System.out.println("Request ID: " + accountInfoResponse.getHeaders().getValue("x-ms-request-id"));Install with Tessl CLI
npx tessl i tessl/maven-com-azure--azure-storage-blob