JNI bindings for Zstd native library that provides fast and high compression lossless algorithm for Java and all JVM languages.
—
High-performance streaming API for direct ByteBuffers with minimal memory copying. These classes provide the highest performance option for applications that can work with direct ByteBuffers and implement custom buffer management strategies.
Stream-based compression using direct ByteBuffers for maximum performance.
/**
* Creates a compressing stream for direct ByteBuffers (protected constructor)
* @param target initial target buffer for compressed output (must be direct)
* @param level compression level (1-22, higher = better compression)
* @throws IOException if initialization fails
*/
protected ZstdDirectBufferCompressingStream(ByteBuffer target, int level) throws IOException;
/**
* Gets recommended output buffer size for optimal performance
* @return recommended buffer size in bytes
*/
public static int recommendedOutputBufferSize();
/**
* Compresses data from source buffer
* @param source source buffer containing data to compress (must be direct)
* @throws IOException if compression fails
*/
public void compress(ByteBuffer source) throws IOException;
/**
* Flushes any pending compressed data
* @throws IOException if flush fails
*/
public void flush() throws IOException;
/**
* Closes the stream and finishes compression
* @throws IOException if close fails
*/
public void close() throws IOException;
/**
* Buffer management callback - override to handle buffer flushing
* @param toFlush buffer that needs to be flushed (flip() first to read data)
* @return buffer to continue using (typically same buffer after clear())
* @throws IOException if buffer handling fails
*/
protected ByteBuffer flushBuffer(ByteBuffer toFlush) throws IOException;Usage Examples:
import com.github.luben.zstd.ZstdDirectBufferCompressingStream;
import java.nio.ByteBuffer;
import java.io.*;
// Extend the class to implement buffer management
class MyCompressingStream extends ZstdDirectBufferCompressingStream {
private final OutputStream output;
public MyCompressingStream(OutputStream output, int level) throws IOException {
super(ByteBuffer.allocateDirect(recommendedOutputBufferSize()), level);
this.output = output;
}
@Override
protected ByteBuffer flushBuffer(ByteBuffer toFlush) throws IOException {
toFlush.flip(); // Prepare for reading
// Write compressed data to output stream
byte[] buffer = new byte[toFlush.remaining()];
toFlush.get(buffer);
output.write(buffer);
toFlush.clear(); // Prepare for writing
return toFlush; // Reuse same buffer
}
}
// Use the custom compressing stream
try (MyCompressingStream compressor = new MyCompressingStream(outputStream, 6)) {
ByteBuffer sourceData = ByteBuffer.allocateDirect(8192);
sourceData.put("Data to compress".getBytes());
sourceData.flip();
compressor.compress(sourceData);
compressor.flush();
}Stream-based decompression using direct ByteBuffers for maximum performance.
/**
* Creates a decompressing stream for direct ByteBuffers
* @param source initial source buffer containing compressed data (must be direct)
*/
public ZstdDirectBufferDecompressingStream(ByteBuffer source);
/**
* Gets recommended target buffer size for optimal performance
* @return recommended buffer size in bytes
*/
public static int recommendedTargetBufferSize();
/**
* Checks if more data is available for decompression
* @return true if more data can be read
*/
public boolean hasRemaining();
/**
* Reads decompressed data into target buffer
* @param target target buffer for decompressed data (must be direct)
* @return number of bytes written to target buffer
* @throws IOException if decompression fails
*/
public int read(ByteBuffer target) throws IOException;
/**
* Closes the stream and releases resources
* @throws IOException if close fails
*/
public void close() throws IOException;
/**
* Buffer management callback - override to refill source buffer
* @param toRefill current source buffer (may need more data)
* @return buffer to continue using (typically same buffer refilled and flipped)
*/
protected ByteBuffer refill(ByteBuffer toRefill);Usage Examples:
import com.github.luben.zstd.ZstdDirectBufferDecompressingStream;
import java.nio.ByteBuffer;
import java.io.*;
// Extend the class to implement buffer management
class MyDecompressingStream extends ZstdDirectBufferDecompressingStream {
private final InputStream input;
public MyDecompressingStream(InputStream input) throws IOException {
super(readInitialData(input));
this.input = input;
}
private static ByteBuffer readInitialData(InputStream input) throws IOException {
ByteBuffer buffer = ByteBuffer.allocateDirect(8192);
byte[] temp = new byte[8192];
int bytesRead = input.read(temp);
if (bytesRead > 0) {
buffer.put(temp, 0, bytesRead);
}
buffer.flip();
return buffer;
}
@Override
protected ByteBuffer refill(ByteBuffer toRefill) {
try {
toRefill.compact(); // Move unread data to beginning
// Read more data from input stream
byte[] temp = new byte[toRefill.remaining()];
int bytesRead = input.read(temp);
if (bytesRead > 0) {
toRefill.put(temp, 0, bytesRead);
}
toRefill.flip(); // Prepare for reading
return toRefill;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
// Use the custom decompressing stream
try (MyDecompressingStream decompressor = new MyDecompressingStream(inputStream)) {
ByteBuffer targetBuffer = ByteBuffer.allocateDirect(
ZstdDirectBufferDecompressingStream.recommendedTargetBufferSize());
while (decompressor.hasRemaining()) {
int bytesRead = decompressor.read(targetBuffer);
if (bytesRead > 0) {
targetBuffer.flip(); // Prepare for reading
// Process decompressed data
byte[] data = new byte[targetBuffer.remaining()];
targetBuffer.get(data);
processData(data);
targetBuffer.clear(); // Prepare for next read
}
}
}Multiple Buffer Strategy:
class MultiBufferCompressor extends ZstdDirectBufferCompressingStream {
private final Queue<ByteBuffer> bufferPool;
private final OutputStream output;
public MultiBufferCompressor(OutputStream output, int level) throws IOException {
super(ByteBuffer.allocateDirect(recommendedOutputBufferSize()), level);
this.output = output;
this.bufferPool = new LinkedList<>();
// Pre-allocate buffer pool
for (int i = 0; i < 4; i++) {
bufferPool.offer(ByteBuffer.allocateDirect(recommendedOutputBufferSize()));
}
}
@Override
protected ByteBuffer flushBuffer(ByteBuffer toFlush) throws IOException {
// Asynchronously write buffer to output
writeBufferAsync(toFlush);
// Get next buffer from pool
ByteBuffer nextBuffer = bufferPool.poll();
if (nextBuffer == null) {
nextBuffer = ByteBuffer.allocateDirect(recommendedOutputBufferSize());
}
return nextBuffer;
}
private void writeBufferAsync(ByteBuffer buffer) {
// Submit to thread pool for async I/O
executor.submit(() -> {
try {
buffer.flip();
writeBufferToStream(buffer, output);
buffer.clear();
bufferPool.offer(buffer); // Return to pool
} catch (IOException e) {
// Handle error
}
});
}
}Memory-Mapped File Integration:
class MMapDecompressor extends ZstdDirectBufferDecompressingStream {
private final MappedByteBuffer mappedFile;
private int position;
public MMapDecompressor(Path compressedFile) throws IOException {
super(createInitialBuffer(compressedFile));
try (RandomAccessFile raf = new RandomAccessFile(compressedFile.toFile(), "r");
FileChannel channel = raf.getChannel()) {
this.mappedFile = channel.map(FileChannel.MapMode.READ_ONLY, 0, channel.size());
}
}
@Override
protected ByteBuffer refill(ByteBuffer toRefill) {
toRefill.clear();
// Copy data from memory-mapped file
int remaining = Math.min(toRefill.remaining(), mappedFile.remaining());
if (remaining > 0) {
ByteBuffer slice = mappedFile.slice();
slice.limit(remaining);
toRefill.put(slice);
mappedFile.position(mappedFile.position() + remaining);
}
toRefill.flip();
return toRefill;
}
}// Use recommended buffer sizes for optimal performance
int outputSize = ZstdDirectBufferCompressingStream.recommendedOutputBufferSize();
int inputSize = ZstdDirectBufferDecompressingStream.recommendedTargetBufferSize();
ByteBuffer outputBuffer = ByteBuffer.allocateDirect(outputSize);
ByteBuffer inputBuffer = ByteBuffer.allocateDirect(inputSize);Direct buffer streaming methods throw IOException on errors:
try (MyCompressingStream compressor = new MyCompressingStream(output, 6)) {
compressor.compress(sourceBuffer);
} catch (IOException e) {
// Handle compression errors
if (e.getMessage().contains("Target buffer has no more space")) {
// Buffer management issue
} else {
// Other compression error
}
}Common error conditions:
Install with Tessl CLI
npx tessl i tessl/maven-com-github-luben--zstd-jni