XGBoost is an optimized distributed gradient boosting library designed to be highly efficient, flexible, and portable
XGBoost provides global configuration management for controlling library behavior, including verbosity, memory management, GPU coordination, and parameter validation. These settings affect all XGBoost operations within the current Python session.
Set and get global configuration parameters that affect XGBoost behavior across all operations.
def set_config(
verbosity=None,
use_rmm=None,
rmm_pool_size=None,
parameter_validation=None,
gpu_coordinate_descent=None
):
"""
Set global configuration parameters.
Parameters:
- verbosity: Verbosity level (0=silent, 1=warning, 2=info, 3=debug)
- use_rmm: Whether to use RAPIDS Memory Manager for GPU memory allocation
- rmm_pool_size: RMM memory pool size in bytes
- parameter_validation: Whether to validate parameters (True/False)
- gpu_coordinate_descent: Whether to use GPU for coordinate descent
Returns:
None
Note:
Configuration changes affect all subsequent XGBoost operations.
"""
def get_config():
"""
Get current global configuration.
Returns:
dict: Dictionary containing current configuration parameters
"""
def config_context(**config):
"""
Context manager for temporary configuration changes.
Parameters:
**config: Configuration parameters to set temporarily
Returns:
Context manager that restores original configuration on exit
Example:
with xgb.config_context(verbosity=0):
# XGBoost operations run silently
model = xgb.train(params, dtrain)
# Original verbosity restored
"""Access build and system information about the XGBoost installation.
def build_info():
"""
Get build information about XGBoost installation.
Returns:
dict: Dictionary containing build information including:
- USE_OPENMP: OpenMP support status
- USE_CUDA: CUDA support status
- USE_NCCL: NCCL support status
- USE_RMM: RMM support status
- BUILD_WITH_SHARED_LIB: Shared library build status
- GCC_VERSION: GCC version used for compilation
- CUDA_VERSION: CUDA version if available
And other build-time configuration details
"""import xgboost as xgb
# Set verbosity levels
xgb.set_config(verbosity=0) # Silent
xgb.set_config(verbosity=1) # Warnings only
xgb.set_config(verbosity=2) # Info messages
xgb.set_config(verbosity=3) # Debug output# Enable RMM for GPU memory management
xgb.set_config(use_rmm=True, rmm_pool_size=1024**3) # 1GB pool
# Disable RMM
xgb.set_config(use_rmm=False)# Enable parameter validation (default)
xgb.set_config(parameter_validation=True)
# Disable parameter validation for performance
xgb.set_config(parameter_validation=False)import xgboost as xgb
# Check current configuration
config = xgb.get_config()
print("Current config:", config)
# Set global verbosity
xgb.set_config(verbosity=1)
# Train model with current settings
dtrain = xgb.DMatrix(X_train, label=y_train)
model = xgb.train(params, dtrain, num_boost_round=100)import xgboost as xgb
# Normal verbosity
print("Current verbosity:", xgb.get_config()['verbosity'])
# Temporarily change configuration
with xgb.config_context(verbosity=0):
# Silent training
model = xgb.train(params, dtrain, num_boost_round=100)
print("Training completed silently")
# Original verbosity restored
print("Verbosity restored:", xgb.get_config()['verbosity'])import xgboost as xgb
# Check build info for GPU support
build_info = xgb.build_info()
print("CUDA support:", build_info.get('USE_CUDA', False))
print("RMM support:", build_info.get('USE_RMM', False))
if build_info.get('USE_CUDA'):
# Configure GPU memory management
xgb.set_config(
use_rmm=True,
rmm_pool_size=2 * 1024**3, # 2GB memory pool
gpu_coordinate_descent=True
)
# Train on GPU
params = {
'objective': 'reg:squarederror',
'tree_method': 'gpu_hist',
'device': 'cuda'
}
model = xgb.train(params, dtrain)import xgboost as xgb
# Optimize for production (disable validation, reduce verbosity)
xgb.set_config(
verbosity=0,
parameter_validation=False
)
# Training will run faster but with less safety checks
model = xgb.train(params, dtrain, num_boost_round=100)
# Re-enable for development
xgb.set_config(
verbosity=1,
parameter_validation=True
)import xgboost as xgb
import os
# Environment-specific configuration
if os.getenv('XGB_DEBUG'):
xgb.set_config(verbosity=3) # Debug mode
elif os.getenv('XGB_QUIET'):
xgb.set_config(verbosity=0) # Silent mode
else:
xgb.set_config(verbosity=1) # Default warnings
# Check environment capabilities
build_info = xgb.build_info()
if build_info.get('USE_OPENMP'):
print("OpenMP parallelization available")
if build_info.get('USE_CUDA'):
print("GPU acceleration available")import xgboost as xgb
# Different configurations for different tasks
def train_model_quietly(params, dtrain):
with xgb.config_context(verbosity=0):
return xgb.train(params, dtrain, num_boost_round=100)
def debug_model_training(params, dtrain):
with xgb.config_context(verbosity=3, parameter_validation=True):
return xgb.train(params, dtrain, num_boost_round=10)
# Use appropriate configuration for each task
production_model = train_model_quietly(params, dtrain)
debug_model = debug_model_training(params, dtrain)# Development configuration
xgb.set_config(
verbosity=2, # Show info messages
parameter_validation=True # Validate parameters
)
# Production configuration
xgb.set_config(
verbosity=1, # Warnings only
parameter_validation=False # Skip validation for speed
)# For GPU environments with limited memory
if xgb.build_info().get('USE_CUDA'):
xgb.set_config(
use_rmm=True,
rmm_pool_size=512 * 1024**2 # 512MB pool
)
# For distributed training
xgb.set_config(verbosity=1) # Reduce log noise across workersInstall with Tessl CLI
npx tessl i tessl/pypi-xgboost