Microsoft Azure client library for Blob Storage - Azure Blob Storage is Microsoft's object storage solution for the cloud, optimized for storing massive amounts of unstructured data such as text or binary data.
—
Azure Blob Storage supports three specialized blob types, each optimized for different use cases. This documentation covers BlockBlobClient, AppendBlobClient, and PageBlobClient for both synchronous and asynchronous operations.
Block blobs are optimized for uploading large amounts of data efficiently and are ideal for streaming scenarios.
import com.azure.storage.blob.specialized.BlockBlobClient;
import com.azure.storage.blob.specialized.BlockBlobAsyncClient;
import com.azure.storage.blob.specialized.SpecializedBlobClientBuilder;
// From base blob client (recommended)
BlobClient blobClient = containerClient.getBlobClient("document.pdf");
BlockBlobClient blockBlobClient = blobClient.getBlockBlobClient();
// Using specialized builder
BlockBlobClient blockBlobClient = new SpecializedBlobClientBuilder()
.blobClient(blobClient)
.buildBlockBlobClient();
// Async version
BlockBlobAsyncClient asyncBlockBlobClient = new SpecializedBlobClientBuilder()
.blobAsyncClient(blobAsyncClient)
.buildBlockBlobAsyncClient();import com.azure.storage.blob.models.*;
import com.azure.core.util.BinaryData;
// Simple upload (entire blob in one operation)
String content = "Complete document content for block blob";
BlockBlobItem uploadResult = blockBlobClient.upload(
BinaryData.fromString(content),
true // overwrite if exists
);
System.out.println("Block blob uploaded: " + uploadResult.getETag());
System.out.println("Content MD5: " + Arrays.toString(uploadResult.getContentMd5()));
System.out.println("Version ID: " + uploadResult.getVersionId());
// Upload with comprehensive options
BlobHttpHeaders headers = new BlobHttpHeaders()
.setContentType("application/pdf")
.setContentDisposition("attachment; filename=document.pdf")
.setCacheControl("public, max-age=31536000");
Map<String, String> metadata = Map.of(
"document-type", "invoice",
"department", "accounting",
"year", "2023"
);
BlobRequestConditions conditions = new BlobRequestConditions()
.setIfNoneMatch("*"); // Only upload if blob doesn't exist
Response<BlockBlobItem> uploadResponse = blockBlobClient.uploadWithResponse(
BinaryData.fromString(content),
content.length(),
headers,
metadata,
AccessTier.HOT,
null, // contentMd5
conditions,
Duration.ofMinutes(10),
Context.NONE
);
System.out.println("Upload status: " + uploadResponse.getStatusCode());// Upload from another URL (server-side copy)
String sourceUrl = "https://example.com/source-file.pdf";
BlockBlobItem urlUploadResult = blockBlobClient.uploadFromUrl(sourceUrl);
System.out.println("Upload from URL completed: " + urlUploadResult.getETag());
// Upload from URL with options
BlobUploadFromUrlOptions urlOptions = new BlobUploadFromUrlOptions(sourceUrl)
.setHeaders(new BlobHttpHeaders().setContentType("application/pdf"))
.setMetadata(Map.of("source", "external"))
.setTags(Map.of("imported", "true"))
.setTier(AccessTier.HOT)
.setRequestConditions(new BlobRequestConditions().setIfNoneMatch("*"))
.setSourceRequestConditions(new BlobRequestConditions()
.setIfModifiedSince(OffsetDateTime.now().minusDays(1)));
Response<BlockBlobItem> urlUploadResponse = blockBlobClient.uploadFromUrlWithResponse(
urlOptions,
Duration.ofMinutes(5),
Context.NONE
);import java.util.Base64;
import java.util.List;
import java.util.ArrayList;
// Stage individual blocks (for large file uploads)
List<String> blockIds = new ArrayList<>();
// Block 1
String blockId1 = Base64.getEncoder().encodeToString("block-001".getBytes());
byte[] blockData1 = "First part of the content".getBytes();
blockBlobClient.stageBlock(blockId1, BinaryData.fromBytes(blockData1));
blockIds.add(blockId1);
// Block 2
String blockId2 = Base64.getEncoder().encodeToString("block-002".getBytes());
byte[] blockData2 = "Second part of the content".getBytes();
blockBlobClient.stageBlock(blockId2, BinaryData.fromBytes(blockData2));
blockIds.add(blockId2);
// Stage block with options
String blockId3 = Base64.getEncoder().encodeToString("block-003".getBytes());
byte[] blockData3 = "Third part with checksum".getBytes();
byte[] blockMd5 = MessageDigest.getInstance("MD5").digest(blockData3);
Response<Void> stageResponse = blockBlobClient.stageBlockWithResponse(
blockId3,
BinaryData.fromBytes(blockData3),
blockMd5,
null, // leaseId
Duration.ofMinutes(2),
Context.NONE
);
blockIds.add(blockId3);
// Commit all blocks to create the final blob
BlockBlobItem commitResult = blockBlobClient.commitBlockList(blockIds);
System.out.println("Blocks committed: " + commitResult.getETag());
// Commit with additional options
Response<BlockBlobItem> commitResponse = blockBlobClient.commitBlockListWithResponse(
blockIds,
new BlobHttpHeaders().setContentType("text/plain"),
Map.of("blocks-count", String.valueOf(blockIds.size())),
AccessTier.HOT,
new BlobRequestConditions().setIfNoneMatch("*"),
Duration.ofMinutes(5),
Context.NONE
);// List current blocks
BlockList blockList = blockBlobClient.listBlocks(BlockListType.ALL);
System.out.println("Committed blocks:");
for (Block block : blockList.getCommittedBlocks()) {
System.out.println(" Block ID: " + block.getName());
System.out.println(" Size: " + block.getSizeLong() + " bytes");
}
System.out.println("Uncommitted blocks:");
for (Block block : blockList.getUncommittedBlocks()) {
System.out.println(" Block ID: " + block.getName());
System.out.println(" Size: " + block.getSizeLong() + " bytes");
}
// List blocks with lease ID
String leaseId = acquiredLeaseId; // From lease operations
Response<BlockList> blockListResponse = blockBlobClient.listBlocksWithResponse(
BlockListType.COMMITTED,
leaseId,
Duration.ofSeconds(30),
Context.NONE
);
// Stage block from URL
String sourceBlockUrl = "https://source.blob.core.windows.net/container/source.txt";
BlobRange sourceRange = new BlobRange(0, 1024L); // First 1KB
blockBlobClient.stageBlockFromUrl(blockId1, sourceBlockUrl, sourceRange);
// Stage block from URL with options
BlockBlobStageBlockFromUrlOptions stageFromUrlOptions = new BlockBlobStageBlockFromUrlOptions(
blockId2, sourceBlockUrl)
.setSourceRange(new BlobRange(1024, 1024L)) // Next 1KB
.setSourceContentMd5(sourceMd5Hash)
.setLeaseId(leaseId)
.setSourceRequestConditions(new BlobRequestConditions()
.setIfMatch(sourceETag));
Response<Void> stageFromUrlResponse = blockBlobClient.stageBlockFromUrlWithResponse(
stageFromUrlOptions,
Duration.ofMinutes(2),
Context.NONE
);// Get output stream for writing
BlobOutputStream blobOutputStream = blockBlobClient.getBlobOutputStream();
try (blobOutputStream) {
// Write data in chunks
blobOutputStream.write("First chunk of data\n".getBytes());
blobOutputStream.write("Second chunk of data\n".getBytes());
blobOutputStream.write("Final chunk of data\n".getBytes());
// Stream automatically commits blocks when closed
} catch (IOException ex) {
System.err.println("Stream write failed: " + ex.getMessage());
}
// Get output stream with options
ParallelTransferOptions streamTransferOptions = new ParallelTransferOptions()
.setBlockSizeLong(2 * 1024 * 1024L) // 2MB blocks
.setMaxConcurrency(4);
BlobHttpHeaders streamHeaders = new BlobHttpHeaders()
.setContentType("text/plain")
.setContentEncoding("utf-8");
Map<String, String> streamMetadata = Map.of(
"streaming", "true",
"created", OffsetDateTime.now().toString()
);
BlobOutputStream configuredStream = blockBlobClient.getBlobOutputStream(
streamTransferOptions,
streamHeaders,
streamMetadata,
AccessTier.HOT,
new BlobRequestConditions().setIfNoneMatch("*"),
Context.NONE
);
try (configuredStream) {
// Write large amounts of data
for (int i = 0; i < 1000; i++) {
configuredStream.write(("Line " + i + " of streaming data\n").getBytes());
}
}Append blobs are optimized for append operations and are ideal for logging scenarios.
import com.azure.storage.blob.specialized.AppendBlobClient;
import com.azure.storage.blob.specialized.AppendBlobAsyncClient;
// From base blob client
AppendBlobClient appendBlobClient = blobClient.getAppendBlobClient();
// Using specialized builder
AppendBlobClient appendBlobClient = new SpecializedBlobClientBuilder()
.blobClient(blobClient)
.buildAppendBlobClient();
// Async version
AppendBlobAsyncClient asyncAppendBlobClient = new SpecializedBlobClientBuilder()
.blobAsyncClient(blobAsyncClient)
.buildAppendBlobAsyncClient();// Create append blob
AppendBlobItem createResult = appendBlobClient.create();
System.out.println("Append blob created: " + createResult.getETag());
// Create with options
BlobHttpHeaders appendHeaders = new BlobHttpHeaders()
.setContentType("text/plain")
.setContentEncoding("utf-8");
Map<String, String> appendMetadata = Map.of(
"log-type", "application",
"service", "web-api",
"environment", "production"
);
Response<AppendBlobItem> createResponse = appendBlobClient.createWithResponse(
appendHeaders,
appendMetadata,
new BlobRequestConditions().setIfNoneMatch("*"),
Duration.ofSeconds(30),
Context.NONE
);
// Create if not exists
AppendBlobItem createIfNotExistsResult = appendBlobClient.createIfNotExists();
if (createIfNotExistsResult != null) {
System.out.println("New append blob created");
} else {
System.out.println("Append blob already exists");
}
// Create if not exists with options
AppendBlobCreateOptions createOptions = new AppendBlobCreateOptions()
.setHeaders(appendHeaders)
.setMetadata(appendMetadata);
Response<AppendBlobItem> createIfNotExistsResponse = appendBlobClient.createIfNotExistsWithResponse(
createOptions,
Duration.ofSeconds(30),
Context.NONE
);// Simple append operation
String logEntry = "[" + OffsetDateTime.now() + "] INFO: User logged in\n";
AppendBlobItem appendResult = appendBlobClient.appendBlock(BinaryData.fromString(logEntry));
System.out.println("Append completed: " + appendResult.getBlobAppendOffset());
System.out.println("Blob committed block count: " + appendResult.getBlobCommittedBlockCount());
// Append with conditions
AppendBlobRequestConditions appendConditions = new AppendBlobRequestConditions()
.setIfMatch(appendResult.getETag())
.setAppendPositionEqual(appendResult.getBlobAppendOffset() + logEntry.getBytes().length);
String nextLogEntry = "[" + OffsetDateTime.now() + "] WARN: High memory usage\n";
byte[] logBytes = nextLogEntry.getBytes(StandardCharsets.UTF_8);
byte[] logMd5 = MessageDigest.getInstance("MD5").digest(logBytes);
Response<AppendBlobItem> appendResponse = appendBlobClient.appendBlockWithResponse(
BinaryData.fromBytes(logBytes),
logMd5,
appendConditions,
Duration.ofSeconds(30),
Context.NONE
);
// Append from URL
String sourceLogUrl = "https://source.blob.core.windows.net/logs/app.log";
BlobRange sourceRange = new BlobRange(0, 1024L); // First 1KB
AppendBlobItem urlAppendResult = appendBlobClient.appendBlockFromUrl(sourceLogUrl, sourceRange);
// Append from URL with options
AppendBlobAppendBlockFromUrlOptions urlAppendOptions = new AppendBlobAppendBlockFromUrlOptions(
sourceLogUrl, sourceRange)
.setSourceContentMd5(sourceMd5)
.setDestinationRequestConditions(new AppendBlobRequestConditions()
.setMaxSize(100 * 1024 * 1024L)) // Max 100MB
.setSourceRequestConditions(new BlobRequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now()));
Response<AppendBlobItem> urlAppendResponse = appendBlobClient.appendBlockFromUrlWithResponse(
urlAppendOptions,
Duration.ofMinutes(2),
Context.NONE
);// Seal append blob (make it read-only)
appendBlobClient.seal();
System.out.println("Append blob sealed - no more appends allowed");
// Seal with conditions
AppendBlobSealOptions sealOptions = new AppendBlobSealOptions()
.setRequestConditions(new AppendBlobRequestConditions()
.setIfMatch(currentETag)
.setLeaseId(leaseId));
Response<Void> sealResponse = appendBlobClient.sealWithResponse(
sealOptions,
Duration.ofSeconds(30),
Context.NONE
);
System.out.println("Seal operation status: " + sealResponse.getStatusCode());
// Check if blob is sealed
BlobProperties properties = appendBlobClient.getProperties();
if (properties.isSealed()) {
System.out.println("Blob is sealed");
} else {
System.out.println("Blob is not sealed");
}// Complete logging example
public class BlobLogger {
private final AppendBlobClient logBlobClient;
private final String logFormat = "[%s] %s: %s%n";
public BlobLogger(BlobContainerClient containerClient, String logFileName) {
this.logBlobClient = containerClient.getBlobClient(logFileName).getAppendBlobClient();
// Ensure log file exists
try {
logBlobClient.createIfNotExists();
} catch (Exception ex) {
throw new RuntimeException("Failed to initialize log file", ex);
}
}
public void log(String level, String message) {
String logEntry = String.format(logFormat,
OffsetDateTime.now(), level, message);
try {
AppendBlobItem result = logBlobClient.appendBlock(BinaryData.fromString(logEntry));
System.out.println("Log written at offset: " + result.getBlobAppendOffset());
} catch (Exception ex) {
System.err.println("Failed to write log: " + ex.getMessage());
}
}
public void info(String message) { log("INFO", message); }
public void warn(String message) { log("WARN", message); }
public void error(String message) { log("ERROR", message); }
}
// Usage
BlobLogger logger = new BlobLogger(containerClient, "application.log");
logger.info("Application started");
logger.warn("High memory usage detected");
logger.error("Database connection failed");Page blobs provide random read/write access and are ideal for VHD files and database scenarios.
import com.azure.storage.blob.specialized.PageBlobClient;
import com.azure.storage.blob.specialized.PageBlobAsyncClient;
// From base blob client
PageBlobClient pageBlobClient = blobClient.getPageBlobClient();
// Using specialized builder
PageBlobClient pageBlobClient = new SpecializedBlobClientBuilder()
.blobClient(blobClient)
.buildPageBlobClient();
// Async version
PageBlobAsyncClient asyncPageBlobClient = new SpecializedBlobClientBuilder()
.blobAsyncClient(blobAsyncClient)
.buildPageBlobAsyncClient();// Create page blob with specific size (must be multiple of 512 bytes)
long pageBlobSize = 1024 * 1024 * 512L; // 512MB
PageBlobItem createResult = pageBlobClient.create(pageBlobSize);
System.out.println("Page blob created: " + createResult.getETag());
System.out.println("Sequence number: " + createResult.getBlobSequenceNumber());
// Create with options
BlobHttpHeaders pageHeaders = new BlobHttpHeaders()
.setContentType("application/octet-stream");
Map<String, String> pageMetadata = Map.of(
"type", "vhd",
"vm-name", "web-server-01",
"size-mb", String.valueOf(pageBlobSize / (1024 * 1024))
);
Long sequenceNumber = 0L;
Response<PageBlobItem> createResponse = pageBlobClient.createWithResponse(
pageBlobSize,
sequenceNumber,
pageHeaders,
pageMetadata,
AccessTier.PREMIUM,
new BlobRequestConditions().setIfNoneMatch("*"),
Duration.ofMinutes(2),
Context.NONE
);
// Create if not exists
PageBlobItem createIfNotExistsResult = pageBlobClient.createIfNotExists(pageBlobSize);
if (createIfNotExistsResult != null) {
System.out.println("New page blob created");
} else {
System.out.println("Page blob already exists");
}
// Create if not exists with options
PageBlobCreateOptions createOptions = new PageBlobCreateOptions(pageBlobSize)
.setSequenceNumber(0L)
.setHeaders(pageHeaders)
.setMetadata(pageMetadata)
.setTier(AccessTier.PREMIUM);
Response<PageBlobItem> createIfNotExistsResponse = pageBlobClient.createIfNotExistsWithResponse(
createOptions,
Duration.ofMinutes(2),
Context.NONE
);import com.azure.storage.blob.models.PageRange;
// Upload pages (must be 512-byte aligned)
PageRange pageRange1 = new PageRange().setStart(0).setEnd(511); // First page (512 bytes)
byte[] pageData1 = new byte[512];
Arrays.fill(pageData1, (byte) 0xAA); // Fill with pattern
PageBlobItem uploadResult = pageBlobClient.uploadPages(pageRange1, BinaryData.fromBytes(pageData1));
System.out.println("Page uploaded: " + uploadResult.getETag());
// Upload multiple pages
PageRange pageRange2 = new PageRange().setStart(512).setEnd(1023); // Second page
byte[] pageData2 = new byte[512];
Arrays.fill(pageData2, (byte) 0xBB);
byte[] pageMd5 = MessageDigest.getInstance("MD5").digest(pageData2);
PageBlobRequestConditions pageConditions = new PageBlobRequestConditions()
.setIfSequenceNumberEqual(uploadResult.getBlobSequenceNumber());
Response<PageBlobItem> uploadResponse = pageBlobClient.uploadPagesWithResponse(
pageRange2,
BinaryData.fromBytes(pageData2),
pageMd5,
pageConditions,
Duration.ofMinutes(2),
Context.NONE
);
// Upload pages from URL
String sourcePageUrl = "https://source.blob.core.windows.net/vhds/template.vhd";
Long sourceOffset = 0L;
PageBlobItem urlUploadResult = pageBlobClient.uploadPagesFromUrl(
pageRange1,
sourcePageUrl,
sourceOffset
);
// Upload pages from URL with options
PageBlobUploadPagesFromUrlOptions urlUploadOptions = new PageBlobUploadPagesFromUrlOptions(
pageRange2, sourcePageUrl, sourceOffset + 512L)
.setSourceContentMd5(sourceMd5)
.setDestinationRequestConditions(new PageBlobRequestConditions()
.setIfMatch(uploadResult.getETag()))
.setSourceRequestConditions(new BlobRequestConditions()
.setIfUnmodifiedSince(OffsetDateTime.now()));
Response<PageBlobItem> urlUploadResponse = pageBlobClient.uploadPagesFromUrlWithResponse(
urlUploadOptions,
Duration.ofMinutes(5),
Context.NONE
);// Get page ranges (find which pages contain data)
PageList pageList = pageBlobClient.getPageRanges(new BlobRange(0, 2048L));
System.out.println("Page ranges with data:");
for (PageRange range : pageList.getPageRanges()) {
System.out.println(" Pages " + range.getStart() + " to " + range.getEnd());
}
System.out.println("Clear page ranges:");
for (PageRange range : pageList.getClearRanges()) {
System.out.println(" Clear pages " + range.getStart() + " to " + range.getEnd());
}
// Get page ranges with conditions
Response<PageList> pageListResponse = pageBlobClient.getPageRangesWithResponse(
new BlobRange(0),
new BlobRequestConditions().setIfMatch(currentETag),
Duration.ofSeconds(30),
Context.NONE
);
// Clear pages (deallocate storage)
PageRange clearRange = new PageRange().setStart(1024).setEnd(1535); // Third page
PageBlobItem clearResult = pageBlobClient.clearPages(clearRange);
System.out.println("Pages cleared: " + clearResult.getETag());
// Clear pages with conditions
Response<PageBlobItem> clearResponse = pageBlobClient.clearPagesWithResponse(
clearRange,
new PageBlobRequestConditions().setIfSequenceNumberLessThan(100L),
Duration.ofSeconds(30),
Context.NONE
);
// Get page ranges diff (compare with snapshot)
String previousSnapshot = "2023-12-01T10:30:00.0000000Z";
PageList pageRangesDiff = pageBlobClient.getPageRangesDiff(
new BlobRange(0),
previousSnapshot
);
System.out.println("Pages modified since snapshot:");
for (PageRange modifiedRange : pageRangesDiff.getPageRanges()) {
System.out.println(" Modified: " + modifiedRange.getStart() + " to " + modifiedRange.getEnd());
}// Resize page blob
long newSize = 2 * 1024 * 1024 * 512L; // 1GB
PageBlobItem resizeResult = pageBlobClient.resize(newSize);
System.out.println("Blob resized: " + resizeResult.getETag());
// Resize with conditions
Response<PageBlobItem> resizeResponse = pageBlobClient.resizeWithResponse(
newSize,
new BlobRequestConditions().setIfUnmodifiedSince(OffsetDateTime.now()),
Duration.ofMinutes(2),
Context.NONE
);
// Get current size
BlobProperties properties = pageBlobClient.getProperties();
System.out.println("Current size: " + properties.getBlobSize() + " bytes");
System.out.println("Current sequence number: " + properties.getBlobSequenceNumber());import com.azure.storage.blob.models.SequenceNumberActionType;
// Update sequence number (for optimistic concurrency control)
Long newSequenceNumber = 42L;
// Set to specific value
PageBlobItem updateResult = pageBlobClient.updateSequenceNumber(
SequenceNumberActionType.UPDATE,
newSequenceNumber
);
// Increment by 1
PageBlobItem incrementResult = pageBlobClient.updateSequenceNumber(
SequenceNumberActionType.INCREMENT,
null // ignored for increment
);
// Set to maximum of current and provided value
PageBlobItem maxResult = pageBlobClient.updateSequenceNumber(
SequenceNumberActionType.MAX,
newSequenceNumber
);
System.out.println("New sequence number: " + updateResult.getBlobSequenceNumber());
// Update with conditions
Response<PageBlobItem> updateResponse = pageBlobClient.updateSequenceNumberWithResponse(
SequenceNumberActionType.UPDATE,
100L,
new BlobRequestConditions().setIfSequenceNumberEqual(incrementResult.getBlobSequenceNumber()),
Duration.ofSeconds(30),
Context.NONE
);// Perform incremental copy (copy only changed pages)
String sourcePageBlobUrl = "https://source.blob.core.windows.net/vhds/base.vhd";
String sourceSnapshot = "2023-12-01T10:30:00.0000000Z";
CopyStatusType copyStatus = pageBlobClient.copyIncremental(sourcePageBlobUrl, sourceSnapshot);
System.out.println("Incremental copy status: " + copyStatus);
// Incremental copy with options
PageBlobCopyIncrementalOptions copyOptions = new PageBlobCopyIncrementalOptions(
sourcePageBlobUrl, sourceSnapshot)
.setRequestConditions(new BlobRequestConditions()
.setIfNoneMatch("*"));
Response<CopyStatusType> copyResponse = pageBlobClient.copyIncrementalWithResponse(
copyOptions,
Duration.ofMinutes(10),
Context.NONE
);
// Monitor copy progress
BlobProperties copyProperties = pageBlobClient.getProperties();
while (copyProperties.getCopyStatus() == CopyStatusType.PENDING) {
System.out.println("Copy progress: " + copyProperties.getCopyProgress());
Thread.sleep(5000); // Wait 5 seconds
copyProperties = pageBlobClient.getProperties();
}
System.out.println("Final copy status: " + copyProperties.getCopyStatus());import reactor.core.publisher.Mono;
import reactor.core.publisher.Flux;
// Async block blob operations
BlockBlobAsyncClient asyncBlockBlobClient = asyncBlobClient.getBlockBlobAsyncClient();
// Async upload
Mono<BlockBlobItem> uploadMono = asyncBlockBlobClient.upload(
BinaryData.fromString("Async block blob content"),
true
);
uploadMono
.doOnSuccess(result -> System.out.println("Async upload: " + result.getETag()))
.doOnError(ex -> System.err.println("Upload failed: " + ex.getMessage()))
.subscribe();
// Async block staging and committing
List<String> blockIds = new ArrayList<>();
String blockId = Base64.getEncoder().encodeToString("async-block".getBytes());
Mono<Void> stageBlockMono = asyncBlockBlobClient.stageBlock(blockId, BinaryData.fromString("Block content"));
Mono<BlockBlobItem> commitMono = stageBlockMono.then(
asyncBlockBlobClient.commitBlockList(List.of(blockId))
);
commitMono
.doOnSuccess(result -> System.out.println("Async blocks committed: " + result.getETag()))
.subscribe();// Async append blob operations
AppendBlobAsyncClient asyncAppendBlobClient = asyncBlobClient.getAppendBlobAsyncClient();
// Create and append in sequence
Mono<Void> appendSequence = asyncAppendBlobClient.createIfNotExists()
.then(asyncAppendBlobClient.appendBlock(BinaryData.fromString("First log entry\n")))
.then(asyncAppendBlobClient.appendBlock(BinaryData.fromString("Second log entry\n")))
.then(asyncAppendBlobClient.appendBlock(BinaryData.fromString("Third log entry\n")))
.then();
appendSequence
.doOnSuccess(v -> System.out.println("Async append sequence completed"))
.doOnError(ex -> System.err.println("Append sequence failed: " + ex.getMessage()))
.subscribe();
// Reactive logging pattern
Flux<String> logMessages = Flux.just(
"Application started",
"User authenticated",
"Data processed",
"Application shutdown"
);
logMessages
.map(msg -> "[" + OffsetDateTime.now() + "] " + msg + "\n")
.map(BinaryData::fromString)
.flatMap(data -> asyncAppendBlobClient.appendBlock(data))
.doOnNext(result -> System.out.println("Log appended at: " + result.getBlobAppendOffset()))
.doOnComplete(() -> System.out.println("All log messages written"))
.subscribe();// Async page blob operations
PageBlobAsyncClient asyncPageBlobClient = asyncBlobClient.getPageBlobAsyncClient();
// Create and upload pages
long pageSize = 1024 * 512L; // 512KB
PageRange range = new PageRange().setStart(0).setEnd(511);
byte[] pageData = new byte[512];
Mono<Void> pageSequence = asyncPageBlobClient.create(pageSize)
.then(asyncPageBlobClient.uploadPages(range, BinaryData.fromBytes(pageData)))
.then();
pageSequence
.doOnSuccess(v -> System.out.println("Async page blob created and page uploaded"))
.doOnError(ex -> System.err.println("Page operation failed: " + ex.getMessage()))
.subscribe();
// Parallel page uploads
List<Mono<PageBlobItem>> pageUploads = new ArrayList<>();
for (int i = 0; i < 10; i++) {
PageRange pageRange = new PageRange()
.setStart(i * 512L)
.setEnd((i + 1) * 512L - 1);
byte[] data = new byte[512];
Arrays.fill(data, (byte) i);
pageUploads.add(asyncPageBlobClient.uploadPages(pageRange, BinaryData.fromBytes(data)));
}
Mono<Void> allPagesUploaded = Mono.when(pageUploads);
allPagesUploaded
.doOnSuccess(v -> System.out.println("All pages uploaded in parallel"))
.subscribe();// Optimize block sizes based on file size and network conditions
public ParallelTransferOptions getOptimalTransferOptions(long fileSize) {
ParallelTransferOptions options = new ParallelTransferOptions();
if (fileSize < 32 * 1024 * 1024) { // < 32MB
options.setBlockSizeLong(1 * 1024 * 1024L); // 1MB blocks
options.setMaxConcurrency(4);
} else if (fileSize < 512 * 1024 * 1024) { // < 512MB
options.setBlockSizeLong(4 * 1024 * 1024L); // 4MB blocks
options.setMaxConcurrency(8);
} else { // >= 512MB
options.setBlockSizeLong(8 * 1024 * 1024L); // 8MB blocks
options.setMaxConcurrency(16);
}
return options;
}
// Usage for large file uploads
long fileSize = Files.size(Paths.get("largefile.dat"));
ParallelTransferOptions optimizedOptions = getOptimalTransferOptions(fileSize);
BlobUploadFromFileOptions uploadOptions = new BlobUploadFromFileOptions("largefile.dat")
.setParallelTransferOptions(optimizedOptions);
blockBlobClient.uploadFromFileWithResponse(uploadOptions, Duration.ofMinutes(30), Context.NONE);Install with Tessl CLI
npx tessl i tessl/maven-com-azure--azure-storage-blob