Comprehensive metadata operations for catalogs, schemas, tables, columns, functions, and type information with full Spark SQL integration and HiveServer2 compatibility.
Retrieve available catalogs in the Spark SQL environment.
/**
* Get available catalogs
* @param sessionHandle Session handle for the operation
* @return OperationHandle for fetching catalog results
* @throws HiveSQLException if operation fails
*/
OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException;/**
* Spark implementation for getting catalogs
*/
class SparkGetCatalogsOperation extends GetCatalogsOperation {
/**
* Run the catalog listing operation
*/
def runInternal(): Unit
}Usage Examples:
// Get available catalogs
OperationHandle catalogOp = cliService.getCatalogs(sessionHandle);
// Fetch results
TRowSet catalogResults = cliService.fetchResults(catalogOp);
TTableSchema catalogSchema = cliService.getResultSetMetadata(catalogOp);
// Process catalog information
// Columns: TABLE_CAT (catalog name)
for (TRow row : catalogResults.getRows()) {
String catalogName = row.getColVals().get(0).getStringVal().getValue();
System.out.println("Catalog: " + catalogName);
}Retrieve database schemas with optional filtering by catalog and schema patterns.
/**
* Get database schemas
* @param sessionHandle Session handle for the operation
* @param catalogName Catalog name filter (null for all catalogs)
* @param schemaName Schema name pattern (null for all schemas, supports SQL wildcards)
* @return OperationHandle for fetching schema results
* @throws HiveSQLException if operation fails
*/
OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName) throws HiveSQLException;/**
* Spark implementation for getting schemas
*/
class SparkGetSchemasOperation extends GetSchemasOperation {
/**
* Create schema operation with filters
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
*/
def this(parentSession: HiveSession, catalogName: String, schemaName: String)
/**
* Run the schema listing operation
*/
def runInternal(): Unit
}Usage Examples:
// Get all schemas
OperationHandle schemaOp = cliService.getSchemas(sessionHandle, null, null);
// Get schemas matching pattern
OperationHandle filteredSchemaOp = cliService.getSchemas(sessionHandle, "spark_catalog", "test_%");
// Fetch results
TRowSet schemaResults = cliService.fetchResults(schemaOp);
// Process schema information
// Columns: TABLE_SCHEM (schema name), TABLE_CATALOG (catalog name)
for (TRow row : schemaResults.getRows()) {
String schemaName = row.getColVals().get(0).getStringVal().getValue();
String catalogName = row.getColVals().get(1).getStringVal().getValue();
System.out.println("Schema: " + catalogName + "." + schemaName);
}Retrieve table metadata with comprehensive filtering options.
/**
* Get tables with filtering options
* @param sessionHandle Session handle for the operation
* @param catalogName Catalog name filter (null for all catalogs)
* @param schemaName Schema name pattern (null for all schemas, supports SQL wildcards)
* @param tableName Table name pattern (null for all tables, supports SQL wildcards)
* @param tableTypes List of table types to include (null for all types)
* @return OperationHandle for fetching table results
* @throws HiveSQLException if operation fails
*/
OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, List<String> tableTypes) throws HiveSQLException;/**
* Spark implementation for getting tables
*/
class SparkGetTablesOperation extends GetTablesOperation {
/**
* Create table operation with comprehensive filters
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param tableName Table name pattern
* @param tableTypes List of table types to include
*/
def this(parentSession: HiveSession, catalogName: String, schemaName: String, tableName: String, tableTypes: JList[String])
/**
* Run the table listing operation
*/
def runInternal(): Unit
}Usage Examples:
import java.util.Arrays;
// Get all tables
OperationHandle tableOp = cliService.getTables(sessionHandle, null, null, null, null);
// Get only external tables in specific schema
List<String> tableTypes = Arrays.asList("EXTERNAL_TABLE");
OperationHandle extTableOp = cliService.getTables(sessionHandle, "spark_catalog", "default", null, tableTypes);
// Get tables matching pattern
OperationHandle patternTableOp = cliService.getTables(sessionHandle, null, "sales", "fact_%", null);
// Fetch results
TRowSet tableResults = cliService.fetchResults(tableOp);
// Process table information
// Columns: TABLE_CAT, TABLE_SCHEM, TABLE_NAME, TABLE_TYPE, REMARKS
for (TRow row : tableResults.getRows()) {
String catalog = row.getColVals().get(0).getStringVal().getValue();
String schema = row.getColVals().get(1).getStringVal().getValue();
String tableName = row.getColVals().get(2).getStringVal().getValue();
String tableType = row.getColVals().get(3).getStringVal().getValue();
String remarks = row.getColVals().get(4).getStringVal().getValue();
System.out.println(String.format("Table: %s.%s.%s (%s)", catalog, schema, tableName, tableType));
}Get supported table types in the current Spark SQL environment.
/**
* Get supported table types
* @param sessionHandle Session handle for the operation
* @return OperationHandle for fetching table type results
* @throws HiveSQLException if operation fails
*/
OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException;/**
* Spark implementation for getting table types
*/
class SparkGetTableTypesOperation extends GetTableTypesOperation {
/**
* Run the table types operation
*/
def runInternal(): Unit
}Usage Examples:
// Get supported table types
OperationHandle tableTypeOp = cliService.getTableTypes(sessionHandle);
TRowSet tableTypeResults = cliService.fetchResults(tableTypeOp);
// Process table types
// Columns: TABLE_TYPE
for (TRow row : tableTypeResults.getRows()) {
String tableType = row.getColVals().get(0).getStringVal().getValue();
System.out.println("Supported table type: " + tableType);
}
// Output: TABLE, VIEW, EXTERNAL_TABLE, etc.Retrieve detailed column information with filtering capabilities.
/**
* Get column information for tables
* @param sessionHandle Session handle for the operation
* @param catalogName Catalog name filter (null for all catalogs)
* @param schemaName Schema name pattern (null for all schemas, supports SQL wildcards)
* @param tableName Table name pattern (null for all tables, supports SQL wildcards)
* @param columnName Column name pattern (null for all columns, supports SQL wildcards)
* @return OperationHandle for fetching column results
* @throws HiveSQLException if operation fails
*/
OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName, String tableName, String columnName) throws HiveSQLException;/**
* Spark implementation for getting columns
*/
class SparkGetColumnsOperation extends GetColumnsOperation {
/**
* Create column operation with filters
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param tableName Table name pattern
* @param columnName Column name pattern
*/
def this(parentSession: HiveSession, catalogName: String, schemaName: String, tableName: String, columnName: String)
/**
* Run the column listing operation
*/
def runInternal(): Unit
}Usage Examples:
// Get all columns for a specific table
OperationHandle columnOp = cliService.getColumns(sessionHandle, "spark_catalog", "default", "employees", null);
// Get columns matching pattern
OperationHandle patternColumnOp = cliService.getColumns(sessionHandle, null, null, "fact_%", "date_%");
// Fetch results
TRowSet columnResults = cliService.fetchResults(columnOp);
// Process column information
// Columns: TABLE_CAT, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, DATA_TYPE, TYPE_NAME,
// COLUMN_SIZE, BUFFER_LENGTH, DECIMAL_DIGITS, NUM_PREC_RADIX, NULLABLE,
// REMARKS, COLUMN_DEF, SQL_DATA_TYPE, SQL_DATETIME_SUB, CHAR_OCTET_LENGTH,
// ORDINAL_POSITION, IS_NULLABLE, SCOPE_CATALOG, SCOPE_SCHEMA, SCOPE_TABLE,
// SOURCE_DATA_TYPE, IS_AUTO_INCREMENT
for (TRow row : columnResults.getRows()) {
String catalog = row.getColVals().get(0).getStringVal().getValue();
String schema = row.getColVals().get(1).getStringVal().getValue();
String tableName = row.getColVals().get(2).getStringVal().getValue();
String columnName = row.getColVals().get(3).getStringVal().getValue();
int dataType = row.getColVals().get(4).getI32Val().getValue();
String typeName = row.getColVals().get(5).getStringVal().getValue();
int columnSize = row.getColVals().get(6).getI32Val().getValue();
int ordinalPosition = row.getColVals().get(16).getI32Val().getValue();
System.out.println(String.format("Column: %s.%s.%s.%s - %s(%d) at position %d",
catalog, schema, tableName, columnName, typeName, columnSize, ordinalPosition));
}Retrieve information about available functions with filtering support.
/**
* Get function information
* @param sessionHandle Session handle for the operation
* @param catalogName Catalog name filter (null for all catalogs)
* @param schemaName Schema name pattern (null for all schemas, supports SQL wildcards)
* @param functionName Function name pattern (null for all functions, supports SQL wildcards)
* @return OperationHandle for fetching function results
* @throws HiveSQLException if operation fails
*/
OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName, String functionName) throws HiveSQLException;/**
* Spark implementation for getting functions
*/
class SparkGetFunctionsOperation extends GetFunctionsOperation {
/**
* Create function operation with filters
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param functionName Function name pattern
*/
def this(parentSession: HiveSession, catalogName: String, schemaName: String, functionName: String)
/**
* Run the function listing operation
*/
def runInternal(): Unit
}Usage Examples:
// Get all functions
OperationHandle functionOp = cliService.getFunctions(sessionHandle, null, null, null);
// Get functions matching pattern
OperationHandle patternFunctionOp = cliService.getFunctions(sessionHandle, null, null, "date_%");
// Get built-in functions only
OperationHandle builtinFunctionOp = cliService.getFunctions(sessionHandle, "system", "builtin", null);
// Fetch results
TRowSet functionResults = cliService.fetchResults(functionOp);
// Process function information
// Columns: FUNCTION_CAT, FUNCTION_SCHEM, FUNCTION_NAME, REMARKS, FUNCTION_TYPE, SPECIFIC_NAME
for (TRow row : functionResults.getRows()) {
String catalog = row.getColVals().get(0).getStringVal().getValue();
String schema = row.getColVals().get(1).getStringVal().getValue();
String functionName = row.getColVals().get(2).getStringVal().getValue();
String remarks = row.getColVals().get(3).getStringVal().getValue();
int functionType = row.getColVals().get(4).getI32Val().getValue();
String typeDesc = switch (functionType) {
case 1 -> "SQL Function";
case 2 -> "Table Function";
default -> "Unknown";
};
System.out.println(String.format("Function: %s.%s.%s (%s) - %s",
catalog, schema, functionName, typeDesc, remarks));
}Get comprehensive SQL type information supported by Spark SQL.
/**
* Get SQL type information
* @param sessionHandle Session handle for the operation
* @return OperationHandle for fetching type information results
* @throws HiveSQLException if operation fails
*/
OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException;/**
* Spark implementation for getting type information
*/
class SparkGetTypeInfoOperation extends GetTypeInfoOperation {
/**
* Run the type information operation
*/
def runInternal(): Unit
}Usage Examples:
// Get all supported SQL types
OperationHandle typeInfoOp = cliService.getTypeInfo(sessionHandle);
TRowSet typeInfoResults = cliService.fetchResults(typeInfoOp);
// Process type information
// Columns: TYPE_NAME, DATA_TYPE, PRECISION, LITERAL_PREFIX, LITERAL_SUFFIX,
// CREATE_PARAMS, NULLABLE, CASE_SENSITIVE, SEARCHABLE, UNSIGNED_ATTRIBUTE,
// FIXED_PREC_SCALE, AUTO_INCREMENT, LOCAL_TYPE_NAME, MINIMUM_SCALE, MAXIMUM_SCALE,
// SQL_DATA_TYPE, SQL_DATETIME_SUB, NUM_PREC_RADIX
for (TRow row : typeInfoResults.getRows()) {
String typeName = row.getColVals().get(0).getStringVal().getValue();
int dataType = row.getColVals().get(1).getI32Val().getValue();
int precision = row.getColVals().get(2).getI32Val().getValue();
String literalPrefix = row.getColVals().get(3).getStringVal().getValue();
String literalSuffix = row.getColVals().get(4).getStringVal().getValue();
boolean nullable = row.getColVals().get(6).getI16Val().getValue() != 0;
boolean caseSensitive = row.getColVals().get(7).getBoolVal().getValue();
System.out.println(String.format("Type: %s (JDBC type %d), precision=%d, nullable=%s, case_sensitive=%s",
typeName, dataType, precision, nullable, caseSensitive));
}Retrieve primary key information for tables (HiveServer2 compatibility).
/**
* Get primary key information for a table
* @param sessionHandle Session handle for the operation
* @param catalog Catalog name
* @param schema Schema name
* @param table Table name
* @return OperationHandle for fetching primary key results
* @throws HiveSQLException if operation fails
*/
OperationHandle getPrimaryKeys(SessionHandle sessionHandle, String catalog, String schema, String table) throws HiveSQLException;Retrieve foreign key relationships between tables.
/**
* Get cross reference information (foreign key relationships)
* @param sessionHandle Session handle for the operation
* @param primaryCatalog Primary table catalog
* @param primarySchema Primary table schema
* @param primaryTable Primary table name
* @param foreignCatalog Foreign table catalog
* @param foreignSchema Foreign table schema
* @param foreignTable Foreign table name
* @return OperationHandle for fetching cross reference results
* @throws HiveSQLException if operation fails
*/
OperationHandle getCrossReference(SessionHandle sessionHandle,
String primaryCatalog, String primarySchema, String primaryTable,
String foreignCatalog, String foreignSchema, String foreignTable) throws HiveSQLException;Central management of all metadata operations through the operation manager.
/**
* Operation manager for creating and managing metadata operations
*/
class SparkSQLOperationManager extends OperationManager {
/**
* Create a new catalog listing operation
* @param parentSession Parent session for the operation
* @return GetCatalogsOperation instance
*/
def newGetCatalogsOperation(parentSession: HiveSession): GetCatalogsOperation
/**
* Create a new schema listing operation
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @return GetSchemasOperation instance
*/
def newGetSchemasOperation(parentSession: HiveSession, catalogName: String, schemaName: String): GetSchemasOperation
/**
* Create a new table listing operation
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param tableName Table name pattern
* @param tableTypes List of table types to include
* @return MetadataOperation instance
*/
def newGetTablesOperation(parentSession: HiveSession, catalogName: String, schemaName: String, tableName: String, tableTypes: JList[String]): MetadataOperation
/**
* Create a new column listing operation
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param tableName Table name pattern
* @param columnName Column name pattern
* @return GetColumnsOperation instance
*/
def newGetColumnsOperation(parentSession: HiveSession, catalogName: String, schemaName: String, tableName: String, columnName: String): GetColumnsOperation
/**
* Create a new function listing operation
* @param parentSession Parent session for the operation
* @param catalogName Catalog name filter
* @param schemaName Schema name pattern
* @param functionName Function name pattern
* @return GetFunctionsOperation instance
*/
def newGetFunctionsOperation(parentSession: HiveSession, catalogName: String, schemaName: String, functionName: String): GetFunctionsOperation
/**
* Create a new type information operation
* @param parentSession Parent session for the operation
* @return GetTypeInfoOperation instance
*/
def newGetTypeInfoOperation(parentSession: HiveSession): GetTypeInfoOperation
}Comprehensive error handling for metadata operations with specific error codes.
// Common SQL states for metadata operation errors
public static final String INVALID_CATALOG_NAME = "HY000";
public static final String INVALID_SCHEMA_NAME = "3F000";
public static final String TABLE_NOT_FOUND = "42S02";
public static final String COLUMN_NOT_FOUND = "42S22";
public static final String FUNCTION_NOT_FOUND = "42000";Error Handling Examples:
try {
OperationHandle tableOp = cliService.getTables(sessionHandle, "invalid_catalog", null, null, null);
TRowSet results = cliService.fetchResults(tableOp);
} catch (HiveSQLException e) {
switch (e.getSqlState()) {
case "HY000":
System.err.println("Invalid catalog name: " + e.getMessage());
break;
case "3F000":
System.err.println("Invalid schema name: " + e.getMessage());
break;
default:
System.err.println("Metadata operation failed: " + e.getMessage());
}
}