Handling edge cases, error conditions, and advanced patterns.
// Tuple with null values
Tuple tuple = Tuple.from("key", null, "value");
String value = tuple.getString(1); // Returns null
// long num = tuple.getLong(1); // Throws IllegalArgumentException
// Reading non-existent keys
byte[] value = db.run(tr -> {
return tr.get("nonexistent".getBytes()).join(); // Returns null
});
if (value == null) {
// Handle missing key
}// Empty range (begin >= end)
db.run(tr -> {
tr.clear("z".getBytes(), "a".getBytes()); // No effect, range is empty
return null;
});
// Range query with no results
List<KeyValue> results = db.read(tr -> {
List<KeyValue> list = new ArrayList<>();
for (KeyValue kv : tr.getRange("z".getBytes(), "a".getBytes())) {
list.add(kv); // Never executes
}
return list;
});// Check transaction size before commit
try (Transaction tr = db.createTransaction()) {
for (int i = 0; i < 10000; i++) {
tr.set(("key" + i).getBytes(), new byte[1000]);
}
long size = tr.getApproximateSize().join();
if (size > 9_000_000) { // Near 10MB limit
// Split into multiple transactions
throw new IllegalStateException("Transaction too large");
}
tr.commit().join();
}// Split large range for processing
db.read(tr -> {
KeyArrayResult splits = tr.getRangeSplitPoints(
"data:".getBytes(),
"data;".getBytes(),
1_000_000 // ~1MB chunks
).join();
byte[][] splitKeys = splits.getKeys();
byte[] begin = "data:".getBytes();
for (byte[] splitKey : splitKeys) {
processChunk(tr, begin, splitKey);
begin = splitKey;
}
processChunk(tr, begin, "data;".getBytes());
return null;
});// Manual retry with onError
Transaction tr = db.createTransaction();
try {
while (true) {
try {
tr.set("key".getBytes(), "value".getBytes());
tr.commit().join();
break; // Success
} catch (Throwable e) {
tr = tr.onError(e).join(); // Returns reset transaction
}
}
} finally {
tr.close();
}
// Check error codes
try {
db.run(tr -> {
tr.set("key".getBytes(), "value".getBytes());
return null;
});
} catch (FDBException e) {
if (e.getCode() == 1007) { // transaction_too_old
// Handle version too old
} else if (e.getCode() == 1020) { // not_committed
// Handle conflict
} else if (!e.isRetryable()) {
// Non-retryable error
throw e;
}
}// Use snapshot reads to avoid conflicts
db.run(tr -> {
// Critical write
tr.set("critical".getBytes(), "data".getBytes());
// Non-conflicting read (snapshot)
ReadTransaction snapshot = tr.snapshot();
byte[] stats = snapshot.get("statistics".getBytes()).join();
// Won't conflict even if statistics key is updated
return null;
});// Only one incomplete versionstamp per key/value
Tuple key = Tuple.from("log", Versionstamp.incomplete());
byte[] packed = key.packWithVersionstamp(); // OK
// Multiple incomplete versionstamps in same tuple
Tuple invalid = Tuple.from(
Versionstamp.incomplete(),
Versionstamp.incomplete()
);
// packWithVersionstamp() will throw IllegalArgumentException
// User version for ordering within transaction
for (int i = 0; i < 10; i++) {
Versionstamp vs = Versionstamp.incomplete(i);
Tuple key = Tuple.from("events", vs);
// Each gets same transaction version but different user version
}// Opening non-existent tenant (no error until first use)
Tenant tenant = db.openTenant("nonexistent".getBytes());
// Error occurs on first operation
try {
tenant.run(tr -> {
tr.set("key".getBytes(), "value".getBytes());
return null;
});
} catch (FDBException e) {
// tenant_not_found error
}
// Tenant must be empty before deletion
Tenant tenant = db.openTenant("to_delete".getBytes());
tenant.run(tr -> {
// Clear all data first
tr.clear(new byte[0], new byte[]{(byte)0xFF});
return null;
});
// Now can delete
TenantManagement.deleteTenant(db, "to_delete".getBytes()).join();// Each thread needs own transaction
ExecutorService executor = Executors.newFixedThreadPool(10);
List<CompletableFuture<Void>> tasks = new ArrayList<>();
for (int i = 0; i < 10; i++) {
final int threadId = i;
tasks.add(CompletableFuture.runAsync(() -> {
// Each thread creates its own transaction
db.run(tr -> {
byte[] key = ("key:" + threadId).getBytes();
tr.set(key, ("value:" + threadId).getBytes());
return null;
});
}, executor));
}
CompletableFuture.allOf(tasks.toArray(new CompletableFuture[0])).join();// Maximum 10,000 watches per database (configurable)
db.options().setMaxWatches(20000); // Increase limit
// Watch with timeout
CompletableFuture<Void> watch = db.run(tr -> {
CompletableFuture<Void> w = tr.watch("key".getBytes());
tr.commit().join();
return w;
});
watch.orTimeout(30, TimeUnit.SECONDS)
.thenRun(() -> System.out.println("Changed"))
.exceptionally(ex -> {
if (ex.getCause() instanceof TimeoutException) {
System.out.println("Watch timed out");
}
return null;
});// Validate key size before use
byte[] key = createKey(userId, path);
if (key.length > 10_000) {
throw new IllegalArgumentException("Key too large (max 10KB)");
}
// Validate value size
byte[] value = createValue(data);
if (value.length > 100_000) {
throw new IllegalArgumentException("Value too large (max 100KB)");
}
db.run(tr -> {
tr.set(key, value);
return null;
});// Access system keys requires special option
db.run(tr -> {
tr.options().setAccessSystemKeys();
// Can now access keys starting with 0xFF
byte[] systemKey = new byte[]{(byte)0xFF, (byte)0xFF, '/', 'c', 'o', 'n', 'f', 'i', 'g'};
byte[] config = tr.get(systemKey).join();
return null;
});// Blobbify range (moves to blob storage)
db.blobbifyRange(
"archive:2020".getBytes(),
"archive:2021".getBytes(),
db.getExecutor()
).thenAccept(success -> {
if (success) {
System.out.println("Range blobbified");
}
});
// Verify blob range integrity
db.verifyBlobRange(
"archive:2020".getBytes(),
"archive:2021".getBytes(),
-1, // Latest version
db.getExecutor()
).thenAccept(version -> {
System.out.println("Verified at version: " + version);
});// Get boundary keys for parallel processing
try (CloseableAsyncIterator<byte[]> boundaries =
LocalityUtil.getBoundaryKeys(
db,
"data:".getBytes(),
"data;".getBytes()
)) {
List<byte[]> splits = new ArrayList<>();
while (boundaries.hasNext()) {
splits.add(boundaries.next());
}
// Process each range in parallel
for (int i = 0; i < splits.size() - 1; i++) {
processRange(splits.get(i), splits.get(i + 1));
}
}