0
# Configuration Options
1
2
This documentation covers the comprehensive configuration options available in the Azure Storage Blob Java SDK for fine-tuning client behavior, performance, and operation parameters.
3
4
## Upload Options
5
6
### BlobParallelUploadOptions
7
8
Configuration for parallel blob upload operations.
9
10
```java
11
import com.azure.storage.blob.options.BlobParallelUploadOptions;
12
import com.azure.storage.blob.models.*;
13
import com.azure.core.util.BinaryData;
14
import reactor.core.publisher.Flux;
15
import java.nio.ByteBuffer;
16
17
// Basic parallel upload configuration
18
ParallelTransferOptions transferOptions = new ParallelTransferOptions()
19
.setBlockSizeLong(4 * 1024 * 1024L) // 4MB blocks
20
.setMaxConcurrency(8)
21
.setProgressListener(bytesTransferred ->
22
System.out.println("Uploaded: " + bytesTransferred + " bytes"));
23
24
BlobParallelUploadOptions uploadOptions = new BlobParallelUploadOptions(
25
BinaryData.fromString("Large content for parallel upload"))
26
.setParallelTransferOptions(transferOptions)
27
.setHeaders(new BlobHttpHeaders()
28
.setContentType("text/plain")
29
.setContentEncoding("utf-8")
30
.setCacheControl("no-cache"))
31
.setMetadata(Map.of(
32
"upload-method", "parallel",
33
"timestamp", OffsetDateTime.now().toString()))
34
.setTags(Map.of(
35
"environment", "production",
36
"team", "backend"))
37
.setTier(AccessTier.HOT)
38
.setRequestConditions(new BlobRequestConditions()
39
.setIfNoneMatch("*"));
40
41
// Upload with comprehensive options
42
Response<BlockBlobItem> uploadResponse = blobClient.uploadWithResponse(
43
uploadOptions,
44
Duration.ofMinutes(10),
45
Context.NONE
46
);
47
48
// Upload from Flux (reactive streams)
49
Flux<ByteBuffer> dataFlux = Flux.range(1, 1000)
50
.map(i -> ByteBuffer.wrap(("Data chunk " + i + "\n").getBytes()));
51
52
BlobParallelUploadOptions fluxOptions = new BlobParallelUploadOptions(dataFlux)
53
.setParallelTransferOptions(new ParallelTransferOptions()
54
.setBlockSizeLong(1024 * 1024L) // 1MB blocks for streaming
55
.setMaxConcurrency(4))
56
.setHeaders(new BlobHttpHeaders().setContentType("text/plain"));
57
58
// Use with async client for reactive upload
59
BlobAsyncClient asyncClient = blobClient.getAsyncClient();
60
asyncClient.uploadWithResponse(fluxOptions)
61
.doOnSuccess(response -> System.out.println("Reactive upload completed"))
62
.subscribe();
63
```
64
65
### BlobUploadFromFileOptions
66
67
Configuration for uploading blobs from local files.
68
69
```java
70
import com.azure.storage.blob.options.BlobUploadFromFileOptions;
71
72
// Configure file upload with all options
73
BlobUploadFromFileOptions fileUploadOptions = new BlobUploadFromFileOptions("large-dataset.csv")
74
.setParallelTransferOptions(new ParallelTransferOptions()
75
.setBlockSizeLong(8 * 1024 * 1024L) // 8MB blocks for large files
76
.setMaxConcurrency(6)
77
.setMaxSingleUploadSizeLong(256 * 1024 * 1024L) // 256MB single upload threshold
78
.setProgressListener(bytesTransferred -> {
79
long totalSize = getTotalFileSize("large-dataset.csv");
80
double percentage = (double) bytesTransferred / totalSize * 100;
81
System.out.printf("Upload progress: %.1f%% (%d/%d bytes)%n",
82
percentage, bytesTransferred, totalSize);
83
}))
84
.setHeaders(new BlobHttpHeaders()
85
.setContentType("text/csv")
86
.setContentDisposition("attachment; filename=dataset.csv")
87
.setCacheControl("private, max-age=0"))
88
.setMetadata(Map.of(
89
"source-file", "large-dataset.csv",
90
"file-size", String.valueOf(getTotalFileSize("large-dataset.csv")),
91
"upload-date", OffsetDateTime.now().toString(),
92
"checksum", calculateFileChecksum("large-dataset.csv")))
93
.setTags(Map.of(
94
"data-type", "csv",
95
"department", "analytics",
96
"retention-years", "7"))
97
.setTier(AccessTier.HOT)
98
.setRequestConditions(new BlobRequestConditions()
99
.setIfNoneMatch("*") // Only upload if blob doesn't exist
100
.setTagsConditions("\"data-type\" <> 'csv'")); // Prevent overwriting CSV files
101
102
// Execute upload with comprehensive error handling
103
try {
104
Response<BlockBlobItem> response = blobClient.uploadFromFileWithResponse(
105
fileUploadOptions,
106
Duration.ofMinutes(30),
107
Context.NONE
108
);
109
110
BlockBlobItem result = response.getValue();
111
System.out.println("File upload completed:");
112
System.out.println("ETag: " + result.getETag());
113
System.out.println("Version ID: " + result.getVersionId());
114
System.out.println("Upload time: " + result.getLastModified());
115
116
} catch (BlobStorageException ex) {
117
handleUploadError(ex);
118
} catch (Exception ex) {
119
System.err.println("Unexpected error during file upload: " + ex.getMessage());
120
}
121
122
// Helper methods
123
private long getTotalFileSize(String filePath) {
124
try {
125
return Files.size(Paths.get(filePath));
126
} catch (Exception ex) {
127
return 0;
128
}
129
}
130
131
private String calculateFileChecksum(String filePath) {
132
try {
133
MessageDigest md5 = MessageDigest.getInstance("MD5");
134
try (InputStream fis = Files.newInputStream(Paths.get(filePath))) {
135
byte[] buffer = new byte[8192];
136
int bytesRead;
137
while ((bytesRead = fis.read(buffer)) != -1) {
138
md5.update(buffer, 0, bytesRead);
139
}
140
}
141
return Base64.getEncoder().encodeToString(md5.digest());
142
} catch (Exception ex) {
143
return "unknown";
144
}
145
}
146
```
147
148
## Download Options
149
150
### BlobDownloadToFileOptions
151
152
Configuration for downloading blobs to local files.
153
154
```java
155
import com.azure.storage.blob.options.BlobDownloadToFileOptions;
156
import com.azure.storage.blob.models.DownloadRetryOptions;
157
import java.nio.file.StandardOpenOption;
158
159
// Configure comprehensive file download
160
BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions("downloaded-file.dat")
161
.setRange(new BlobRange(0)) // Download from beginning (entire file)
162
.setParallelTransferOptions(new ParallelTransferOptions()
163
.setBlockSizeLong(4 * 1024 * 1024L) // 4MB download blocks
164
.setMaxConcurrency(8)
165
.setProgressListener(bytesTransferred -> {
166
System.out.println("Downloaded: " + bytesTransferred + " bytes");
167
}))
168
.setDownloadRetryOptions(new DownloadRetryOptions()
169
.setMaxRetryRequests(5))
170
.setRequestConditions(new BlobRequestConditions()
171
.setIfModifiedSince(OffsetDateTime.now().minusDays(1)))
172
.setRetrieveContentMd5(true) // Validate content integrity
173
.setOpenOptions(Set.of(
174
StandardOpenOption.CREATE,
175
StandardOpenOption.WRITE,
176
StandardOpenOption.TRUNCATE_EXISTING));
177
178
// Execute download with monitoring
179
try {
180
Response<BlobProperties> downloadResponse = blobClient.downloadToFileWithResponse(
181
downloadOptions,
182
Duration.ofMinutes(15),
183
Context.NONE
184
);
185
186
BlobProperties properties = downloadResponse.getValue();
187
System.out.println("Download completed:");
188
System.out.println("File size: " + properties.getBlobSize() + " bytes");
189
System.out.println("Content type: " + properties.getContentType());
190
System.out.println("Last modified: " + properties.getLastModified());
191
192
// Verify content integrity if MD5 was requested
193
byte[] contentMd5 = properties.getContentMd5();
194
if (contentMd5 != null) {
195
System.out.println("Content MD5: " + Base64.getEncoder().encodeToString(contentMd5));
196
verifyDownloadedFileIntegrity("downloaded-file.dat", contentMd5);
197
}
198
199
} catch (BlobStorageException ex) {
200
handleDownloadError(ex);
201
}
202
203
// Partial download with range
204
BlobRange partialRange = new BlobRange(1024 * 1024, 5 * 1024 * 1024L); // Download 5MB starting at 1MB
205
BlobDownloadToFileOptions partialOptions = new BlobDownloadToFileOptions("partial-download.dat")
206
.setRange(partialRange)
207
.setParallelTransferOptions(new ParallelTransferOptions()
208
.setBlockSizeLong(512 * 1024L) // Smaller blocks for partial download
209
.setMaxConcurrency(4));
210
211
Response<BlobProperties> partialResponse = blobClient.downloadToFileWithResponse(
212
partialOptions,
213
Duration.ofMinutes(5),
214
Context.NONE
215
);
216
217
System.out.println("Partial download completed: " +
218
Files.size(Paths.get("partial-download.dat")) + " bytes");
219
```
220
221
### BlobDownloadOptions
222
223
Configuration for in-memory blob downloads.
224
225
```java
226
import com.azure.storage.blob.options.BlobDownloadOptions;
227
228
// Configure in-memory download with range
229
BlobDownloadOptions downloadOptions = new BlobDownloadOptions()
230
.setRange(new BlobRange(0, 1024 * 1024L)) // First 1MB
231
.setRequestConditions(new BlobRequestConditions()
232
.setIfUnmodifiedSince(OffsetDateTime.now()));
233
234
// Download to memory
235
BlobDownloadContentResponse contentResponse = blobClient.downloadContentWithResponse(
236
downloadOptions,
237
Duration.ofMinutes(2),
238
Context.NONE
239
);
240
241
BinaryData content = contentResponse.getValue();
242
System.out.println("Downloaded " + content.getLength() + " bytes to memory");
243
244
// Process content based on type
245
String contentType = contentResponse.getHeaders().getValue("Content-Type");
246
if (contentType != null && contentType.startsWith("text/")) {
247
String textContent = content.toString();
248
System.out.println("Text content preview: " +
249
textContent.substring(0, Math.min(100, textContent.length())));
250
} else {
251
System.out.println("Binary content downloaded");
252
}
253
```
254
255
## Container Creation Options
256
257
### BlobContainerCreateOptions
258
259
Configuration for creating blob containers.
260
261
```java
262
import com.azure.storage.blob.options.BlobContainerCreateOptions;
263
import com.azure.storage.blob.models.BlobContainerEncryptionScope;
264
265
// Configure container creation with all options
266
BlobContainerCreateOptions createOptions = new BlobContainerCreateOptions()
267
.setMetadata(Map.of(
268
"environment", "production",
269
"team", "backend",
270
"created-by", "deployment-script",
271
"cost-center", "12345",
272
"retention-policy", "7-years"))
273
.setPublicAccessType(PublicAccessType.BLOB) // Allow public blob access
274
.setEncryptionScope(new BlobContainerEncryptionScope()
275
.setDefaultEncryptionScope("containerencryption")
276
.setPreventEncryptionScopeOverride(true));
277
278
// Create container with comprehensive configuration
279
try {
280
Response<Void> createResponse = containerClient.createWithResponse(
281
createOptions,
282
Duration.ofMinutes(2),
283
Context.NONE
284
);
285
286
System.out.println("Container created successfully:");
287
System.out.println("Status code: " + createResponse.getStatusCode());
288
System.out.println("Request ID: " + createResponse.getHeaders().getValue("x-ms-request-id"));
289
290
// Verify configuration
291
BlobContainerProperties properties = containerClient.getProperties();
292
System.out.println("Public access: " + properties.getPublicAccess());
293
System.out.println("Metadata count: " + properties.getMetadata().size());
294
System.out.println("Default encryption scope: " + properties.getDefaultEncryptionScope());
295
296
} catch (BlobStorageException ex) {
297
if (ex.getErrorCode() == BlobErrorCode.CONTAINER_ALREADY_EXISTS) {
298
System.out.println("Container already exists");
299
} else {
300
throw ex;
301
}
302
}
303
304
// Create container with minimal public access
305
BlobContainerCreateOptions secureOptions = new BlobContainerCreateOptions()
306
.setMetadata(Map.of("security-level", "high"))
307
.setPublicAccessType(null); // No public access
308
309
containerClient.createWithResponse(secureOptions, Duration.ofMinutes(1), Context.NONE);
310
```
311
312
## List and Query Options
313
314
### ListBlobsOptions
315
316
Configuration for listing blobs in containers.
317
318
```java
319
import com.azure.storage.blob.models.ListBlobsOptions;
320
import com.azure.storage.blob.models.BlobListDetails;
321
322
// Configure comprehensive blob listing
323
ListBlobsOptions listOptions = new ListBlobsOptions()
324
.setPrefix("logs/2023/") // Filter by prefix
325
.setMaxResultsPerPage(100) // Pagination
326
.setDetails(new BlobListDetails()
327
.setRetrieveDeletedBlobs(true) // Include soft-deleted blobs
328
.setRetrieveMetadata(true) // Include blob metadata
329
.setRetrieveTags(true) // Include blob tags
330
.setRetrieveSnapshots(true) // Include blob snapshots
331
.setRetrieveVersions(true) // Include blob versions
332
.setRetrieveUncommittedBlobs(false) // Exclude uncommitted blocks
333
.setRetrieveCopy(true) // Include copy information
334
.setRetrieveImmutabilityPolicy(true) // Include immutability policies
335
.setRetrieveLegalHolds(true)); // Include legal holds
336
337
// List blobs with comprehensive information
338
PagedIterable<BlobItem> blobs = containerClient.listBlobs(listOptions, Duration.ofMinutes(2));
339
340
for (BlobItem blob : blobs) {
341
System.out.println("Blob: " + blob.getName());
342
System.out.println("Size: " + blob.getProperties().getContentLength() + " bytes");
343
System.out.println("Last modified: " + blob.getProperties().getLastModified());
344
System.out.println("Is deleted: " + blob.isDeleted());
345
System.out.println("Is current version: " + blob.isCurrentVersion());
346
347
// Display metadata if retrieved
348
if (blob.getMetadata() != null) {
349
System.out.println("Metadata:");
350
blob.getMetadata().forEach((key, value) ->
351
System.out.println(" " + key + ": " + value));
352
}
353
354
// Display tags if retrieved
355
if (blob.getTags() != null) {
356
System.out.println("Tags:");
357
blob.getTags().forEach((key, value) ->
358
System.out.println(" " + key + ": " + value));
359
}
360
361
System.out.println("---");
362
}
363
364
// Hierarchical listing (folder-like structure)
365
ListBlobsOptions hierarchicalOptions = new ListBlobsOptions()
366
.setPrefix("documents/")
367
.setDetails(new BlobListDetails().setRetrieveMetadata(true));
368
369
PagedIterable<BlobItem> hierarchicalBlobs = containerClient.listBlobsByHierarchy(
370
"/", // delimiter
371
hierarchicalOptions,
372
Duration.ofMinutes(1)
373
);
374
375
for (BlobItem item : hierarchicalBlobs) {
376
if (item.isPrefix()) {
377
System.out.println("Directory: " + item.getName());
378
} else {
379
System.out.println("File: " + item.getName());
380
}
381
}
382
```
383
384
### FindBlobsOptions
385
386
Configuration for finding blobs by tags.
387
388
```java
389
import com.azure.storage.blob.options.FindBlobsOptions;
390
391
// Configure tag-based blob search
392
String tagQuery = "\"environment\" = 'production' AND \"team\" = 'backend' AND \"retention-years\" >= '5'";
393
394
FindBlobsOptions findOptions = new FindBlobsOptions(tagQuery)
395
.setMaxResultsPerPage(50); // Limit results per page
396
397
// Find blobs matching tag criteria
398
PagedIterable<TaggedBlobItem> taggedBlobs = serviceClient.findBlobsByTags(
399
findOptions,
400
Duration.ofMinutes(2)
401
);
402
403
System.out.println("Blobs matching tag query: " + tagQuery);
404
for (TaggedBlobItem taggedBlob : taggedBlobs) {
405
System.out.println("Blob: " + taggedBlob.getName());
406
System.out.println("Container: " + taggedBlob.getContainerName());
407
System.out.println("Tags: " + taggedBlob.getTags().toMap());
408
System.out.println("---");
409
}
410
411
// Complex tag query examples
412
String[] complexQueries = {
413
"\"cost-center\" = '12345'", // Simple equality
414
"\"retention-years\" >= '7'", // Numeric comparison
415
"\"environment\" = 'production' OR \"environment\" = 'staging'", // OR condition
416
"\"department\" = 'finance' AND \"classification\" <> 'public'", // AND with not equal
417
"\"created-date\" >= '2023-01-01' AND \"created-date\" < '2024-01-01'" // Date range
418
};
419
420
for (String query : complexQueries) {
421
System.out.println("Query: " + query);
422
PagedIterable<TaggedBlobItem> results = serviceClient.findBlobsByTags(
423
new FindBlobsOptions(query).setMaxResultsPerPage(10),
424
Duration.ofSeconds(30)
425
);
426
427
int count = 0;
428
for (TaggedBlobItem item : results) {
429
count++;
430
if (count > 5) break; // Limit output
431
}
432
System.out.println("Found " + count + "+ matching blobs");
433
System.out.println();
434
}
435
```
436
437
## Transfer and Performance Options
438
439
### ParallelTransferOptions
440
441
Configuration for optimizing parallel data transfer.
442
443
```java
444
import com.azure.storage.blob.models.ParallelTransferOptions;
445
import com.azure.storage.blob.ProgressReceiver;
446
447
// Create performance-optimized transfer options
448
public ParallelTransferOptions createOptimalTransferOptions(
449
long fileSize,
450
String networkCondition,
451
boolean showProgress) {
452
453
ParallelTransferOptions options = new ParallelTransferOptions();
454
455
// Optimize based on file size
456
if (fileSize < 16 * 1024 * 1024) { // < 16MB
457
options.setBlockSizeLong(1 * 1024 * 1024L); // 1MB blocks
458
options.setMaxConcurrency(2);
459
options.setMaxSingleUploadSizeLong(fileSize); // Single upload for small files
460
} else if (fileSize < 256 * 1024 * 1024) { // < 256MB
461
options.setBlockSizeLong(4 * 1024 * 1024L); // 4MB blocks
462
options.setMaxConcurrency(4);
463
options.setMaxSingleUploadSizeLong(32 * 1024 * 1024L); // 32MB threshold
464
} else if (fileSize < 1024 * 1024 * 1024) { // < 1GB
465
options.setBlockSizeLong(8 * 1024 * 1024L); // 8MB blocks
466
options.setMaxConcurrency(8);
467
options.setMaxSingleUploadSizeLong(64 * 1024 * 1024L); // 64MB threshold
468
} else { // >= 1GB
469
options.setBlockSizeLong(16 * 1024 * 1024L); // 16MB blocks
470
options.setMaxConcurrency(16);
471
options.setMaxSingleUploadSizeLong(128 * 1024 * 1024L); // 128MB threshold
472
}
473
474
// Adjust for network conditions
475
switch (networkCondition.toLowerCase()) {
476
case "slow":
477
options.setMaxConcurrency(Math.max(1, options.getMaxConcurrency() / 2));
478
options.setBlockSizeLong(Math.max(1024 * 1024L, options.getBlockSizeLong() / 2));
479
break;
480
case "fast":
481
options.setMaxConcurrency(Math.min(32, options.getMaxConcurrency() * 2));
482
break;
483
case "satellite":
484
options.setMaxConcurrency(1); // High latency, low concurrency
485
options.setBlockSizeLong(32 * 1024 * 1024L); // Larger blocks
486
break;
487
}
488
489
// Add progress tracking if requested
490
if (showProgress) {
491
options.setProgressListener(new DetailedProgressReceiver(fileSize));
492
}
493
494
return options;
495
}
496
497
// Custom progress receiver with detailed reporting
498
class DetailedProgressReceiver implements ProgressReceiver {
499
private final long totalSize;
500
private long lastReported = 0;
501
private final long startTime;
502
503
public DetailedProgressReceiver(long totalSize) {
504
this.totalSize = totalSize;
505
this.startTime = System.currentTimeMillis();
506
}
507
508
@Override
509
public void reportProgress(long bytesTransferred) {
510
// Report every 5% or every 10MB, whichever is less frequent
511
long reportInterval = Math.min(totalSize / 20, 10 * 1024 * 1024L);
512
513
if (bytesTransferred - lastReported >= reportInterval || bytesTransferred == totalSize) {
514
double percentage = (double) bytesTransferred / totalSize * 100;
515
long elapsedMs = System.currentTimeMillis() - startTime;
516
double rate = elapsedMs > 0 ? (bytesTransferred / 1024.0 / 1024.0) / (elapsedMs / 1000.0) : 0;
517
518
System.out.printf("Transfer progress: %.1f%% (%d/%d bytes) - %.2f MB/s%n",
519
percentage, bytesTransferred, totalSize, rate);
520
521
lastReported = bytesTransferred;
522
}
523
}
524
}
525
526
// Usage examples
527
long fileSize = 500 * 1024 * 1024L; // 500MB file
528
529
// Optimized for good network connection
530
ParallelTransferOptions fastOptions = createOptimalTransferOptions(fileSize, "fast", true);
531
BlobUploadFromFileOptions fastUpload = new BlobUploadFromFileOptions("large-file.dat")
532
.setParallelTransferOptions(fastOptions);
533
534
// Optimized for slow/unreliable connection
535
ParallelTransferOptions slowOptions = createOptimalTransferOptions(fileSize, "slow", true);
536
BlobUploadFromFileOptions slowUpload = new BlobUploadFromFileOptions("large-file.dat")
537
.setParallelTransferOptions(slowOptions);
538
539
// Optimized for satellite/high-latency connection
540
ParallelTransferOptions satelliteOptions = createOptimalTransferOptions(fileSize, "satellite", true);
541
BlobUploadFromFileOptions satelliteUpload = new BlobUploadFromFileOptions("large-file.dat")
542
.setParallelTransferOptions(satelliteOptions);
543
```
544
545
### DownloadRetryOptions
546
547
Configuration for download retry behavior.
548
549
```java
550
import com.azure.storage.blob.models.DownloadRetryOptions;
551
552
// Configure robust download retry options
553
DownloadRetryOptions retryOptions = new DownloadRetryOptions()
554
.setMaxRetryRequests(5); // Maximum number of retry attempts
555
556
// Use in download operations
557
BlobDownloadToFileOptions downloadOptions = new BlobDownloadToFileOptions("reliable-download.dat")
558
.setDownloadRetryOptions(retryOptions)
559
.setParallelTransferOptions(new ParallelTransferOptions()
560
.setBlockSizeLong(2 * 1024 * 1024L) // Smaller blocks for reliability
561
.setMaxConcurrency(4));
562
563
// Download with retry configuration
564
try {
565
Response<BlobProperties> response = blobClient.downloadToFileWithResponse(
566
downloadOptions,
567
Duration.ofMinutes(20), // Longer timeout for retries
568
Context.NONE
569
);
570
571
System.out.println("Download completed with retries if needed");
572
573
} catch (Exception ex) {
574
System.err.println("Download failed after retries: " + ex.getMessage());
575
}
576
577
// Configure for different reliability needs
578
public DownloadRetryOptions createRetryOptions(String reliabilityLevel) {
579
switch (reliabilityLevel.toLowerCase()) {
580
case "high":
581
return new DownloadRetryOptions().setMaxRetryRequests(10);
582
case "medium":
583
return new DownloadRetryOptions().setMaxRetryRequests(5);
584
case "low":
585
return new DownloadRetryOptions().setMaxRetryRequests(2);
586
case "none":
587
return new DownloadRetryOptions().setMaxRetryRequests(0);
588
default:
589
return new DownloadRetryOptions().setMaxRetryRequests(3);
590
}
591
}
592
```
593
594
## Advanced Configuration Options
595
596
### CustomerProvidedKey
597
598
Configuration for customer-managed encryption keys.
599
600
```java
601
import com.azure.storage.blob.models.CustomerProvidedKey;
602
import com.azure.storage.blob.models.EncryptionAlgorithmType;
603
604
// Configure customer-provided encryption key
605
CustomerProvidedKey cpk = new CustomerProvidedKey("customer-encryption-key-32-bytes!")
606
.setEncryptionAlgorithm(EncryptionAlgorithmType.AES256);
607
608
// Create client with customer-provided key
609
BlobServiceClient encryptedServiceClient = new BlobServiceClientBuilder()
610
.connectionString(connectionString)
611
.customerProvidedKey(cpk)
612
.buildClient();
613
614
// All blobs created through this client will use the CPK
615
BlobClient encryptedBlobClient = encryptedServiceClient
616
.getBlobContainerClient("encrypted-container")
617
.getBlobClient("encrypted-file.txt");
618
619
// Upload encrypted content
620
encryptedBlobClient.upload(BinaryData.fromString("Sensitive encrypted content"), true);
621
622
// Download requires the same key
623
BinaryData decryptedContent = encryptedBlobClient.downloadContent();
624
System.out.println("Decrypted content: " + decryptedContent.toString());
625
626
// Key rotation example
627
CustomerProvidedKey newCpk = new CustomerProvidedKey("new-encryption-key-32-bytes-long!")
628
.setEncryptionAlgorithm(EncryptionAlgorithmType.AES256);
629
630
// Create new client with rotated key
631
BlobServiceClient rotatedKeyClient = new BlobServiceClientBuilder()
632
.connectionString(connectionString)
633
.customerProvidedKey(newCpk)
634
.buildClient();
635
636
// Copy blob with new encryption key
637
BlobClient sourceBlobClient = encryptedServiceClient
638
.getBlobContainerClient("encrypted-container")
639
.getBlobClient("encrypted-file.txt");
640
641
BlobClient targetBlobClient = rotatedKeyClient
642
.getBlobContainerClient("encrypted-container")
643
.getBlobClient("re-encrypted-file.txt");
644
645
String copyId = targetBlobClient.copyFromUrl(sourceBlobClient.getBlobUrl());
646
System.out.println("Key rotation copy started: " + copyId);
647
```
648
649
### Encryption Scope Configuration
650
651
```java
652
// Configure encryption scope at service client level
653
BlobServiceClient scopedServiceClient = new BlobServiceClientBuilder()
654
.connectionString(connectionString)
655
.encryptionScope("production-encryption-scope")
656
.buildClient();
657
658
// All operations will use the configured encryption scope
659
BlobContainerClient scopedContainer = scopedServiceClient.getBlobContainerClient("scoped-container");
660
BlobClient scopedBlob = scopedContainer.getBlobClient("scoped-file.txt");
661
662
// Upload with automatic encryption scope application
663
scopedBlob.upload(BinaryData.fromString("Content encrypted with scope"), true);
664
665
// Container-level encryption scope configuration
666
BlobContainerEncryptionScope containerScope = new BlobContainerEncryptionScope()
667
.setDefaultEncryptionScope("container-specific-scope")
668
.setPreventEncryptionScopeOverride(true); // Enforce scope for all blobs
669
670
BlobContainerCreateOptions scopedContainerOptions = new BlobContainerCreateOptions()
671
.setEncryptionScope(containerScope)
672
.setMetadata(Map.of("encryption", "enforced"));
673
674
containerClient.createWithResponse(scopedContainerOptions, Duration.ofMinutes(1), Context.NONE);
675
```
676
677
## HTTP Client and Pipeline Configuration
678
679
### Advanced HTTP Configuration
680
681
```java
682
import com.azure.core.http.HttpClient;
683
import com.azure.core.http.netty.NettyAsyncHttpClientBuilder;
684
import com.azure.core.http.policy.*;
685
import com.azure.core.util.Configuration;
686
import java.time.Duration;
687
688
// Configure custom HTTP client with detailed settings
689
HttpClient customHttpClient = new NettyAsyncHttpClientBuilder()
690
.connectionTimeout(Duration.ofSeconds(60))
691
.responseTimeout(Duration.ofMinutes(5))
692
.readTimeout(Duration.ofMinutes(2))
693
.writeTimeout(Duration.ofMinutes(2))
694
.maxIdleTime(Duration.ofSeconds(60))
695
.maxLifeTime(Duration.ofMinutes(10))
696
.maxChunkSize(8192)
697
.maxInitialLineLength(4096)
698
.maxHeaderSize(8192)
699
.build();
700
701
// Configure retry policy with custom settings
702
RetryPolicy customRetryPolicy = new RetryPolicy("exponential", 5) {
703
@Override
704
public Duration calculateRetryDelay(int retryAttempt) {
705
// Custom exponential backoff: 1s, 2s, 4s, 8s, 16s
706
return Duration.ofSeconds((long) Math.pow(2, retryAttempt));
707
}
708
};
709
710
// Configure request retry options
711
RequestRetryOptions retryOptions = new RequestRetryOptions(
712
RetryPolicyType.EXPONENTIAL,
713
5, // maxTries
714
Duration.ofMinutes(2), // tryTimeout
715
Duration.ofSeconds(1), // retryDelay
716
Duration.ofSeconds(30), // maxRetryDelay
717
null // secondaryHost
718
);
719
720
// Create service client with comprehensive configuration
721
BlobServiceClient configuredClient = new BlobServiceClientBuilder()
722
.endpoint("https://myaccount.blob.core.windows.net")
723
.credential(new DefaultAzureCredentialBuilder().build())
724
.httpClient(customHttpClient)
725
.retryPolicy(customRetryPolicy)
726
.retryOptions(retryOptions)
727
.addPolicy(new UserAgentPolicy("MyApplication/1.0.0"))
728
.addPolicy(new RequestIdPolicy())
729
.configuration(Configuration.getGlobalConfiguration())
730
.serviceVersion(BlobServiceVersion.getLatest())
731
.buildClient();
732
733
// Custom logging policy
734
HttpLogOptions logOptions = new HttpLogOptions()
735
.setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)
736
.addAllowedHeaderName("x-ms-request-id")
737
.addAllowedHeaderName("x-ms-version")
738
.addAllowedQueryParamName("timeout");
739
740
BlobServiceClient loggingClient = new BlobServiceClientBuilder()
741
.connectionString(connectionString)
742
.httpLogOptions(logOptions)
743
.buildClient();
744
```
745
746
## Context and Metadata Configuration
747
748
### Request Context Configuration
749
750
```java
751
import com.azure.core.util.Context;
752
import com.azure.core.util.tracing.Tracer;
753
754
// Create context with custom values
755
Context customContext = Context.NONE
756
.addData("operation-id", UUID.randomUUID().toString())
757
.addData("user-id", "user-12345")
758
.addData("correlation-id", "corr-" + System.currentTimeMillis());
759
760
// Use context in operations for tracing and correlation
761
Response<BlockBlobItem> contextualUpload = blobClient.uploadWithResponse(
762
new BlobParallelUploadOptions(BinaryData.fromString("Content with context"))
763
.setMetadata(Map.of("operation-id", customContext.getData("operation-id").orElse("unknown").toString())),
764
Duration.ofMinutes(5),
765
customContext
766
);
767
768
// Extract request ID from response for logging
769
String requestId = contextualUpload.getHeaders().getValue("x-ms-request-id");
770
System.out.println("Upload completed with request ID: " + requestId);
771
772
// Context with timeout override
773
Context timeoutContext = Context.NONE.addData("timeout", Duration.ofMinutes(10));
774
775
// Context with custom headers
776
Context headerContext = Context.NONE.addData("custom-header", "custom-value");
777
```
778
779
## Performance Monitoring Configuration
780
781
### Advanced Progress Tracking
782
783
```java
784
// Comprehensive progress tracking with metrics
785
public class AdvancedProgressTracker implements ProgressReceiver {
786
private final String operationName;
787
private final long totalSize;
788
private final long startTime;
789
private long lastUpdate;
790
private long bytesAtLastUpdate;
791
private final List<Double> speedSamples = new ArrayList<>();
792
793
public AdvancedProgressTracker(String operationName, long totalSize) {
794
this.operationName = operationName;
795
this.totalSize = totalSize;
796
this.startTime = System.currentTimeMillis();
797
this.lastUpdate = startTime;
798
}
799
800
@Override
801
public void reportProgress(long bytesTransferred) {
802
long currentTime = System.currentTimeMillis();
803
804
if (currentTime - lastUpdate >= 1000 || bytesTransferred == totalSize) { // Update every second
805
double elapsedSeconds = (currentTime - startTime) / 1000.0;
806
double overallSpeed = elapsedSeconds > 0 ? (bytesTransferred / 1024.0 / 1024.0) / elapsedSeconds : 0;
807
808
// Calculate instantaneous speed
809
double instantSpeed = 0;
810
if (lastUpdate > 0) {
811
double intervalSeconds = (currentTime - lastUpdate) / 1000.0;
812
long intervalBytes = bytesTransferred - bytesAtLastUpdate;
813
instantSpeed = intervalSeconds > 0 ? (intervalBytes / 1024.0 / 1024.0) / intervalSeconds : 0;
814
speedSamples.add(instantSpeed);
815
}
816
817
double percentage = (double) bytesTransferred / totalSize * 100;
818
819
// Calculate ETA
820
String eta = "unknown";
821
if (overallSpeed > 0) {
822
long remainingBytes = totalSize - bytesTransferred;
823
long etaSeconds = (long) (remainingBytes / 1024.0 / 1024.0 / overallSpeed);
824
eta = formatDuration(etaSeconds);
825
}
826
827
System.out.printf("[%s] %.1f%% (%s/%s) - Speed: %.2f MB/s (avg: %.2f MB/s) - ETA: %s%n",
828
operationName,
829
percentage,
830
formatBytes(bytesTransferred),
831
formatBytes(totalSize),
832
instantSpeed,
833
overallSpeed,
834
eta
835
);
836
837
lastUpdate = currentTime;
838
bytesAtLastUpdate = bytesTransferred;
839
}
840
}
841
842
private String formatBytes(long bytes) {
843
if (bytes < 1024) return bytes + " B";
844
if (bytes < 1024 * 1024) return String.format("%.1f KB", bytes / 1024.0);
845
if (bytes < 1024 * 1024 * 1024) return String.format("%.1f MB", bytes / 1024.0 / 1024.0);
846
return String.format("%.2f GB", bytes / 1024.0 / 1024.0 / 1024.0);
847
}
848
849
private String formatDuration(long seconds) {
850
if (seconds < 60) return seconds + "s";
851
if (seconds < 3600) return String.format("%dm %ds", seconds / 60, seconds % 60);
852
return String.format("%dh %dm", seconds / 3600, (seconds % 3600) / 60);
853
}
854
855
public void printSummary() {
856
long totalTime = System.currentTimeMillis() - startTime;
857
double avgSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).average().orElse(0.0);
858
double maxSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).max().orElse(0.0);
859
double minSpeed = speedSamples.stream().mapToDouble(Double::doubleValue).min().orElse(0.0);
860
861
System.out.println("\n" + operationName + " Summary:");
862
System.out.printf("Total time: %s%n", formatDuration(totalTime / 1000));
863
System.out.printf("Average speed: %.2f MB/s%n", avgSpeed);
864
System.out.printf("Max speed: %.2f MB/s%n", maxSpeed);
865
System.out.printf("Min speed: %.2f MB/s%n", minSpeed);
866
System.out.printf("Total transferred: %s%n", formatBytes(totalSize));
867
}
868
}
869
870
// Usage with comprehensive monitoring
871
long fileSize = Files.size(Paths.get("large-file.bin"));
872
AdvancedProgressTracker tracker = new AdvancedProgressTracker("Large File Upload", fileSize);
873
874
ParallelTransferOptions monitoredOptions = new ParallelTransferOptions()
875
.setProgressListener(tracker)
876
.setBlockSizeLong(8 * 1024 * 1024L)
877
.setMaxConcurrency(6);
878
879
BlobUploadFromFileOptions uploadOptions = new BlobUploadFromFileOptions("large-file.bin")
880
.setParallelTransferOptions(monitoredOptions);
881
882
try {
883
blobClient.uploadFromFileWithResponse(uploadOptions, Duration.ofMinutes(30), Context.NONE);
884
tracker.printSummary();
885
} catch (Exception ex) {
886
System.err.println("Upload failed: " + ex.getMessage());
887
tracker.printSummary();
888
}
889
```
890
891
## Related Documentation
892
893
- [← Back to Overview](index.md)
894
- [← Model Classes & Enums](models.md)
895
- [Security & Authentication →](security.md)
896
- [Streaming & Advanced I/O →](streaming.md)