Runtime classes required by a task manager for execution of table programs in Apache Flink's Blink planner
npx @tessl/cli install tessl/maven-org-apache-flink--flink-table-runtime-blink_2-11@1.13.0Runtime classes required by a task manager for execution of table programs in Apache Flink's Blink planner. This module provides the core execution infrastructure for Flink's table processing ecosystem, enabling high-performance stream and batch processing of structured data with SQL and Table API queries.
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>flink-table-runtime-blink_2.11</artifactId>
<version>1.13.6</version>
</dependency>// Data structures and vectorization
import org.apache.flink.table.data.*;
import org.apache.flink.table.data.conversion.*;
import org.apache.flink.table.data.vector.*;
// File system integration
import org.apache.flink.table.filesystem.*;
// Runtime operators and code generation
import org.apache.flink.table.runtime.generated.*;
import org.apache.flink.table.runtime.operators.*;
// Type system
import org.apache.flink.table.runtime.types.*;
import org.apache.flink.table.runtime.typeutils.*;This module is primarily used internally by Flink's table runtime but exposes key interfaces for extending functionality:
// Using the file system table factory
import org.apache.flink.table.filesystem.FileSystemTableFactory;
import org.apache.flink.table.descriptors.DescriptorProperties;
// Create file system table source/sink
FileSystemTableFactory factory = new FileSystemTableFactory();
// Configure through table environment
// Data conversion example
import org.apache.flink.table.data.conversion.DataStructureConverter;
import org.apache.flink.table.data.RowData;
// Convert between internal and external formats
DataStructureConverter<RowData, Row> converter =
DataStructureConverter.getConverter(dataType);
Row externalRow = converter.toExternal(internalRowData);
RowData internalRowData = converter.toInternal(externalRow);
// Vectorized data processing
import org.apache.flink.table.data.VectorizedColumnBatch;
import org.apache.flink.table.data.ColumnVector;
ColumnVector[] vectors = createColumnVectors(); // Create appropriate column vectors
VectorizedColumnBatch batch = new VectorizedColumnBatch(vectors);
// Process data in columnar format for better performanceThe module is organized around several key components:
Core data structures for efficient columnar processing, type conversion between internal and external formats, and vectorized operations for high-performance analytics.
// Core interfaces for vectorized data access
interface ColumnVector {
boolean isNullAt(int rowId);
// Type-specific access methods implemented by subclasses
}
// Main vectorized batch container
class VectorizedColumnBatch {
VectorizedColumnBatch(int numCols, int maxRows);
ColumnVector[] columns;
int numRows;
}
// Key converter interface
interface DataStructureConverter<I, E> {
E toExternal(I internal);
I toInternal(E external);
}Complete file system table source and sink implementation with support for partitioning, streaming writes, file compaction, and integration with various storage systems.
// Primary factory for file system tables
class FileSystemTableFactory
implements DynamicTableSourceFactory, DynamicTableSinkFactory {
DynamicTableSource createDynamicTableSource(Context context);
DynamicTableSink createDynamicTableSink(Context context);
}
// Core partition management interfaces
interface PartitionWriter<T> {
void write(T record) throws Exception;
void close() throws Exception;
}
interface PartitionCommitPolicy {
boolean shouldCommit(Context context) throws Exception;
void commit(Context context) throws Exception;
}Comprehensive set of operators for joins, aggregations, window operations, sorting, ranking, and other table processing operations optimized for both streaming and batch execution.
// Main factory for code-generated operators
class CodeGenOperatorFactory<OUT> extends AbstractStreamOperatorFactory<OUT> {
CodeGenOperatorFactory(GeneratedClass<? extends StreamOperator<OUT>> operatorCodeGenerator);
}
// Window operator builder
class WindowOperatorBuilder {
WindowOperatorBuilder withWindowAssigner(WindowAssigner<?, ? extends Window> assigner);
WindowOperatorBuilder withWindowFunction(WindowFunction<?, ?, ?, ?> function);
OneInputStreamOperator<RowData, RowData> build();
}
// Join type enumerations
enum FlinkJoinType {
INNER, LEFT, RIGHT, FULL, SEMI, ANTI
}Framework for generating optimized runtime code including aggregation functions, join conditions, projections, and other operations for maximum performance.
// Base class for all generated classes
abstract class GeneratedClass<T> {
String getClassName();
String getCode();
Object[] getReferences();
}
// Generated function interfaces
interface AggsHandleFunction {
void accumulate(RowData input) throws Exception;
RowData getValue() throws Exception;
}
interface JoinCondition {
boolean apply(RowData left, RowData right) throws Exception;
}Comprehensive type conversion, serialization, and type information system supporting all Flink data types with optimized serializers for runtime performance.
// Key type converter classes
class ClassDataTypeConverter {
static Optional<DataType> extractDataType(Class<?> clazz);
static Optional<Class<?>> extractClass(DataType dataType);
}
// Primary row data serializer
class RowDataSerializer extends TypeSerializer<RowData> {
RowDataSerializer(LogicalType... types);
RowData deserialize(DataInputView source) throws IOException;
void serialize(RowData record, DataOutputView target) throws IOException;
}Memory management utilities, specialized collections, and helper classes for efficient runtime operations including memory pools, hash sets, and binary data processing.
// Memory pool for efficient segment management
class LazyMemorySegmentPool {
LazyMemorySegmentPool(int numberOfPages, int pageSize);
MemorySegment nextSegment();
void returnAll(List<MemorySegment> memory);
}
// Specialized hash collections
class IntHashSet {
boolean add(int value);
boolean contains(int value);
int size();
}
// LRU cache implementation
class LRUMap<K, V> extends LinkedHashMap<K, V> {
LRUMap(int maxCapacity);
}The module uses standard Java exception handling patterns:
IOException for file system operationsRuntimeException for runtime execution errorsSerializationException for data serialization issuesMost operations that can fail are designed to propagate exceptions to allow proper error handling at the application level.
@Internal annotations indicating it provides internal runtime infrastructure