CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-xgboost-cpu

XGBoost Python Package (CPU only) - A minimal installation with no support for GPU algorithms or federated learning, providing optimized distributed gradient boosting for machine learning

Overview
Eval results
Files

distributed-computing.mddocs/

Distributed Computing

Native support for distributed training across Dask and Spark ecosystems, enabling scalable machine learning on large datasets and compute clusters. XGBoost provides seamless integration with popular distributed computing frameworks while maintaining high performance and fault tolerance.

Capabilities

Dask Integration

XGBoost's Dask integration enables distributed training and prediction using Dask's flexible task scheduling and data structures. Supports both Dask DataFrame and Dask Array inputs with automatic data partitioning and worker coordination.

import xgboost.dask as dxgb

def dxgb.train(client, params, dtrain, num_boost_round=10, evals=(), 
               obj=None, maximize=None, early_stopping_rounds=None, 
               evals_result=None, verbose_eval=True, xgb_model=None, 
               callbacks=None):
    """
    Train XGBoost model using Dask distributed computing.
    
    Parameters:
    - client: Dask client for distributed computation (dask.distributed.Client)
    - params: Training parameters (same as xgb.train) (dict)
    - dtrain: Training DMatrix or DaskDMatrix (DMatrix or DaskDMatrix)
    - num_boost_round: Number of boosting rounds (int)
    - evals: Evaluation sets as list of (DMatrix, name) tuples (list)
    - obj: Custom objective function (callable, optional)
    - maximize: Whether to maximize metric (bool, optional)
    - early_stopping_rounds: Early stopping rounds (int, optional)
    - evals_result: Dict to store evaluation results (dict, optional)
    - verbose_eval: Verbosity for evaluation (bool or int)
    - xgb_model: Existing model to continue training (Booster, optional)
    - callbacks: Training callbacks (list, optional)
    
    Returns: dict - Contains 'booster' (trained model) and 'history' (training log)
    """

def dxgb.predict(client, model, data, output_margin=False, missing=float('nan'), 
                 pred_leaf=False, pred_contribs=False, approx_contribs=False, 
                 pred_interactions=False, validate_features=True, 
                 iteration_range=(0, 0), strict_shape=False):
    """
    Run distributed prediction using Dask.
    
    Parameters:
    - client: Dask client (dask.distributed.Client)
    - model: Trained XGBoost model (Booster)
    - data: Input data (DaskDMatrix, dask.DataFrame, or dask.Array)
    - output_margin: Whether to output margin values (bool)
    - missing: Value to treat as missing (float)
    - pred_leaf: Whether to predict leaf indices (bool)
    - pred_contribs: Whether to predict contributions (bool)
    - approx_contribs: Whether to use approximate contributions (bool)
    - pred_interactions: Whether to predict interactions (bool)
    - validate_features: Whether to validate features (bool)
    - iteration_range: Range of trees to use (tuple)
    - strict_shape: Whether to enforce strict shape (bool)
    
    Returns: dask.Array or dask.DataFrame - Distributed predictions
    """

def dxgb.inplace_predict(client, model, data, iteration_range=(0, 0), 
                        predict_type='value', missing=float('nan'), 
                        validate_features=True, base_margin=None, 
                        strict_shape=False):
    """
    Inplace distributed prediction using Dask.
    
    Parameters:
    - client: Dask client (dask.distributed.Client)
    - model: Trained XGBoost model (Booster)
    - data: Input data (dask.DataFrame or dask.Array)
    - iteration_range: Range of trees to use (tuple)
    - predict_type: Type of prediction ('value', 'margin', etc.) (str)
    - missing: Value to treat as missing (float)
    - validate_features: Whether to validate features (bool)
    - base_margin: Base prediction margins (array-like, optional)
    - strict_shape: Whether to enforce strict shape (bool)
    
    Returns: dask.Array - Distributed predictions
    """

class dxgb.DaskDMatrix:
    def __init__(self, client, data, label=None, *, weight=None, 
                 base_margin=None, missing=None, silent=False, 
                 feature_names=None, feature_types=None, group=None, 
                 qid=None, label_lower_bound=None, label_upper_bound=None, 
                 feature_weights=None, enable_categorical=False):
        """
        DMatrix holding references to Dask DataFrame or Array.
        
        Parameters:
        - client: Dask distributed client (dask.distributed.Client)
        - data: Input data (dask.DataFrame, dask.Array, or list of such objects)
        - label: Target values (dask.DataFrame, dask.Array, or list)
        - weight: Instance weights (dask.DataFrame, dask.Array, or list)
        - base_margin: Base prediction margins (dask.DataFrame, dask.Array, or list)
        - missing: Value to treat as missing (float)
        - silent: Whether to suppress loading messages (bool)
        - feature_names: Feature names (list of str)
        - feature_types: Feature types (list of str)
        - group: Group sizes for ranking (dask.Array or list)
        - qid: Query IDs for ranking (dask.Array or list)
        - label_lower_bound: Lower bounds for ranking (dask.Array or list)
        - label_upper_bound: Upper bounds for ranking (dask.Array or list)
        - feature_weights: Feature weights (dask.Array or list)
        - enable_categorical: Enable categorical features (bool)
        """

class dxgb.DaskQuantileDMatrix:
    def __init__(self, client, data, label=None, *, weight=None, 
                 base_margin=None, missing=None, silent=False, 
                 feature_names=None, feature_types=None, group=None, 
                 qid=None, label_lower_bound=None, label_upper_bound=None, 
                 feature_weights=None, ref=None, enable_categorical=False, 
                 max_bin=256):
        """
        Distributed QuantileDMatrix for memory-efficient training.
        
        Parameters: Same as DaskDMatrix with additional:
        - ref: Reference QuantileDMatrix for validation (DaskQuantileDMatrix)
        - max_bin: Maximum number of bins for quantization (int)
        """

class dxgb.DaskXGBRegressor:
    def __init__(self, *, max_depth=6, learning_rate=0.3, n_estimators=100, 
                 verbosity=1, objective=None, booster='gbtree', 
                 tree_method='auto', n_jobs=None, gamma=0, min_child_weight=1, 
                 max_delta_step=0, subsample=1, colsample_bytree=1, 
                 colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, 
                 reg_lambda=1, scale_pos_weight=1, base_score=None, 
                 random_state=None, missing=float('nan'), num_parallel_tree=1, 
                 monotone_constraints=None, interaction_constraints=None, 
                 importance_type='gain', **kwargs):
        """Dask-distributed XGBoost regressor with scikit-learn API."""
    
    def fit(self, X, y, *, sample_weight=None, base_margin=None, 
            eval_set=None, verbose=True, xgb_model=None, 
            sample_weight_eval_set=None, base_margin_eval_set=None, 
            feature_weights=None):
        """Fit distributed regressor. Requires active Dask client."""
    
    def predict(self, X, *, output_margin=False, validate_features=True, 
                base_margin=None, iteration_range=None):
        """Distributed prediction returning dask.Array."""

class dxgb.DaskXGBClassifier:
    def __init__(self, *, max_depth=6, learning_rate=0.3, n_estimators=100, 
                 verbosity=1, objective=None, booster='gbtree', 
                 tree_method='auto', n_jobs=None, gamma=0, min_child_weight=1, 
                 max_delta_step=0, subsample=1, colsample_bytree=1, 
                 colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, 
                 reg_lambda=1, scale_pos_weight=1, base_score=None, 
                 random_state=None, missing=float('nan'), num_parallel_tree=1, 
                 monotone_constraints=None, interaction_constraints=None, 
                 importance_type='gain', **kwargs):
        """Dask-distributed XGBoost classifier with scikit-learn API."""
    
    def predict_proba(self, X, *, validate_features=True, base_margin=None, 
                      iteration_range=None):
        """Predict class probabilities using distributed computation."""

class dxgb.DaskXGBRanker:
    def __init__(self, *, max_depth=6, learning_rate=0.3, n_estimators=100, 
                 verbosity=1, objective='rank:ndcg', booster='gbtree', 
                 tree_method='auto', n_jobs=None, gamma=0, min_child_weight=1, 
                 max_delta_step=0, subsample=1, colsample_bytree=1, 
                 colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, 
                 reg_lambda=1, scale_pos_weight=1, base_score=None, 
                 random_state=None, missing=float('nan'), num_parallel_tree=1, 
                 monotone_constraints=None, interaction_constraints=None, 
                 importance_type='gain', **kwargs):
        """Dask-distributed XGBoost ranker for learning-to-rank tasks."""

class dxgb.DaskXGBRFRegressor:
    """Dask-distributed XGBoost random forest regressor."""

class dxgb.DaskXGBRFClassifier:
    """Dask-distributed XGBoost random forest classifier."""

class dxgb.CommunicatorContext:
    def __init__(self, **args):
        """
        Dask-specific communicator context manager.
        
        Parameters:
        - **args: Arguments for communicator setup
        """

Spark Integration

XGBoost integration with Apache Spark (PySpark) for large-scale distributed machine learning in Spark environments. Supports Spark DataFrames and MLlib pipeline integration.

from xgboost import spark as spark_xgb

class spark_xgb.SparkXGBRegressor:
    def __init__(self, *, features_col='features', label_col='label', 
                 prediction_col='prediction', max_depth=6, learning_rate=0.3, 
                 n_estimators=100, verbosity=1, objective=None, 
                 booster='gbtree', tree_method='auto', gamma=0, 
                 min_child_weight=1, max_delta_step=0, subsample=1, 
                 colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1, 
                 reg_alpha=0, reg_lambda=1, scale_pos_weight=1, 
                 base_score=None, random_state=None, missing=float('nan'), 
                 num_parallel_tree=1, **kwargs):
        """
        PySpark XGBoost regressor integrated with MLlib pipelines.
        
        Parameters:
        - features_col: Features column name (str)
        - label_col: Label column name (str)
        - prediction_col: Prediction column name (str)
        - Other parameters: Same as XGBRegressor
        """
    
    def fit(self, dataset):
        """
        Fit the regressor on Spark DataFrame.
        
        Parameters:
        - dataset: Training data (pyspark.sql.DataFrame)
        
        Returns: SparkXGBRegressorModel
        """

class spark_xgb.SparkXGBRegressorModel:
    def transform(self, dataset):
        """
        Transform Spark DataFrame with predictions.
        
        Parameters:
        - dataset: Input data (pyspark.sql.DataFrame)
        
        Returns: pyspark.sql.DataFrame - DataFrame with predictions
        """

class spark_xgb.SparkXGBClassifier:
    def __init__(self, *, features_col='features', label_col='label', 
                 prediction_col='prediction', probability_col='probability', 
                 raw_prediction_col='rawPrediction', max_depth=6, 
                 learning_rate=0.3, n_estimators=100, verbosity=1, 
                 objective=None, booster='gbtree', tree_method='auto', 
                 gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, 
                 colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1, 
                 reg_alpha=0, reg_lambda=1, scale_pos_weight=1, 
                 base_score=None, random_state=None, missing=float('nan'), 
                 num_parallel_tree=1, **kwargs):
        """
        PySpark XGBoost classifier integrated with MLlib pipelines.
        
        Parameters:
        - probability_col: Probability column name (str)
        - raw_prediction_col: Raw prediction column name (str)
        - Other parameters: Same as SparkXGBRegressor
        """

class spark_xgb.SparkXGBClassifierModel:
    def transform(self, dataset):
        """Transform with class predictions and probabilities."""

class spark_xgb.SparkXGBRanker:
    def __init__(self, *, features_col='features', label_col='label', 
                 prediction_col='prediction', group_col=None, 
                 max_depth=6, learning_rate=0.3, n_estimators=100, 
                 verbosity=1, objective='rank:ndcg', booster='gbtree', 
                 tree_method='auto', gamma=0, min_child_weight=1, 
                 max_delta_step=0, subsample=1, colsample_bytree=1, 
                 colsample_bylevel=1, colsample_bynode=1, reg_alpha=0, 
                 reg_lambda=1, scale_pos_weight=1, base_score=None, 
                 random_state=None, missing=float('nan'), 
                 num_parallel_tree=1, **kwargs):
        """
        PySpark XGBoost ranker for learning-to-rank tasks.
        
        Parameters:
        - group_col: Group column name for ranking (str)
        - Other parameters: Same as SparkXGBRegressor
        """

class spark_xgb.SparkXGBRankerModel:
    """Trained Spark XGBoost ranker model."""

Usage Examples

Dask Distributed Training

import dask
import dask.dataframe as dd
import dask.array as da
from dask.distributed import Client
import xgboost.dask as dxgb
import numpy as np
from sklearn.datasets import make_classification

# Start Dask client
client = Client('localhost:8786')  # Connect to Dask scheduler
# Or start local cluster: client = Client(processes=True, n_workers=4, threads_per_worker=2)

# Create large dataset
X, y = make_classification(n_samples=100000, n_features=100, n_classes=2, 
                          n_informative=50, random_state=42)

# Convert to Dask arrays for distributed processing
X_da = da.from_array(X, chunks=(10000, 100))
y_da = da.from_array(y, chunks=(10000,))

# Create DaskDMatrix
dtrain = dxgb.DaskDMatrix(client, X_da, y_da)

# Training parameters
params = {
    'objective': 'binary:logistic',
    'eval_metric': 'logloss',
    'max_depth': 6,
    'learning_rate': 0.1,
    'subsample': 0.8,
    'colsample_bytree': 0.8,
    'tree_method': 'hist',  # Recommended for distributed training
    'random_state': 42
}

# Distributed training
output = dxgb.train(
    client=client,
    params=params,
    dtrain=dtrain,
    num_boost_round=100,
    evals=[(dtrain, 'train')],
    early_stopping_rounds=10,
    verbose_eval=10
)

# Extract trained model
model = output['booster']
history = output['history']

print(f"Training completed with {len(history)} iterations")

# Distributed prediction
predictions = dxgb.predict(client, model, X_da)
print(f"Predictions shape: {predictions.shape}")

# Compute predictions (trigger computation)
pred_values = predictions.compute()
print(f"Computed predictions shape: {pred_values.shape}")

# Close client
client.close()

Dask with DataFrames

import pandas as pd
import dask.dataframe as dd
from dask.distributed import Client
import xgboost.dask as dxgb

# Start Dask client
client = Client(processes=True, n_workers=2, threads_per_worker=2)

# Create or load large dataset as Dask DataFrame
# In practice, you'd load from files: dd.read_csv('large_dataset.csv')
df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(X.shape[1])])
df['target'] = y

# Convert to Dask DataFrame
ddf = dd.from_pandas(df, npartitions=8)

# Separate features and target
feature_cols = [f'feature_{i}' for i in range(X.shape[1])]
X_dask = ddf[feature_cols]
y_dask = ddf['target']

# Create DaskDMatrix from DataFrame
dtrain = dxgb.DaskDMatrix(client, X_dask, y_dask)

# Training parameters optimized for distributed training
params = {
    'objective': 'binary:logistic',
    'eval_metric': ['logloss', 'auc'],
    'max_depth': 8,
    'learning_rate': 0.1,
    'subsample': 0.8,
    'colsample_bytree': 0.8,
    'tree_method': 'hist',
    'max_bin': 256,
    'random_state': 42
}

# Distributed training with evaluation
output = dxgb.train(
    client=client,
    params=params,
    dtrain=dtrain,
    num_boost_round=200,
    evals=[(dtrain, 'train')],
    early_stopping_rounds=20,
    verbose_eval=25
)

model = output['booster']

# Distributed prediction on new data
test_ddf = ddf.sample(frac=0.2)  # Sample for testing
test_X = test_ddf[feature_cols]

predictions = dxgb.predict(client, model, test_X)
pred_df = test_ddf.assign(predictions=predictions)

# Compute and display results
result = pred_df.compute()
print(f"Test predictions sample:\n{result[['target', 'predictions']].head()}")

client.close()

Dask Scikit-learn Interface

from dask.distributed import Client
import xgboost.dask as dxgb
import dask.array as da

# Start client
client = Client(processes=True, n_workers=4)

# Create distributed data
X_da = da.random.random((50000, 50), chunks=(5000, 50))
y_da = da.random.randint(0, 2, 50000, chunks=(5000,))

# Split data
train_size = int(0.8 * len(X_da))
X_train, X_test = X_da[:train_size], X_da[train_size:]
y_train, y_test = y_da[:train_size], y_da[train_size:]

# Distributed classifier
dask_clf = dxgb.DaskXGBClassifier(
    objective='binary:logistic',
    max_depth=6,
    learning_rate=0.1,
    n_estimators=100,
    tree_method='hist',
    random_state=42
)

# Fit with distributed data
dask_clf.fit(X_train, y_train, 
             eval_set=[(X_test, y_test)],
             early_stopping_rounds=10,
             verbose=False)

# Distributed prediction
y_pred = dask_clf.predict(X_test)
y_pred_proba = dask_clf.predict_proba(X_test)

print(f"Predictions computed: {y_pred.compute().shape}")
print(f"Probabilities computed: {y_pred_proba.compute().shape}")

# Feature importance (computed on the worker that has the model)
importance = dask_clf.feature_importances_
print(f"Feature importance shape: {importance.shape}")

client.close()

Spark Distributed Training

from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from xgboost.spark import SparkXGBClassifier
import numpy as np
import pandas as pd

# Initialize Spark session
spark = SparkSession.builder \
    .appName("XGBoost Distributed Training") \
    .config("spark.sql.adaptive.enabled", "true") \
    .config("spark.sql.adaptive.coalescePartitions.enabled", "true") \
    .getOrCreate()

# Create sample data
X, y = make_classification(n_samples=100000, n_features=20, n_classes=2, 
                          n_informative=15, random_state=42)

# Create pandas DataFrame
df = pd.DataFrame(X, columns=[f'feature_{i}' for i in range(20)])
df['label'] = y

# Convert to Spark DataFrame
spark_df = spark.createDataFrame(df)

# Prepare features using VectorAssembler
feature_cols = [f'feature_{i}' for i in range(20)]
assembler = VectorAssembler(inputCols=feature_cols, outputCol='features')

# Create XGBoost classifier for Spark
xgb_classifier = SparkXGBClassifier(
    features_col='features',
    label_col='label',
    prediction_col='prediction',
    probability_col='probability',
    max_depth=6,
    learning_rate=0.1,
    n_estimators=100,
    subsample=0.8,
    colsample_bytree=0.8,
    tree_method='hist',
    objective='binary:logistic',
    eval_metric='logloss',
    random_state=42
)

# Create ML pipeline
pipeline = Pipeline(stages=[assembler, xgb_classifier])

# Split data
train_df, test_df = spark_df.randomSplit([0.8, 0.2], seed=42)

# Train model
model = pipeline.fit(train_df)

# Make predictions
predictions = model.transform(test_df)

# Show results
predictions.select('label', 'prediction', 'probability').show(10, truncate=False)

# Evaluate model
evaluator = BinaryClassificationEvaluator(labelCol='label', rawPredictionCol='prediction')
auc = evaluator.evaluate(predictions)
print(f"AUC: {auc:.4f}")

# Stop Spark
spark.stop()

Distributed Cross-Validation with Dask

from dask.distributed import Client
import xgboost.dask as dxgb
import dask.array as da
from sklearn.model_selection import KFold
import numpy as np

# Setup
client = Client(processes=True, n_workers=4)

# Create data
X_da = da.random.random((20000, 30), chunks=(2000, 30))
y_da = da.random.randint(0, 2, 20000, chunks=(2000,))

# Parameters for cross-validation
params = {
    'objective': 'binary:logistic',
    'eval_metric': 'auc',
    'max_depth': 6,
    'learning_rate': 0.1,
    'tree_method': 'hist',
    'random_state': 42
}

# Manual cross-validation with Dask
def distributed_cv(client, X, y, params, n_splits=5, n_rounds=100):
    """Perform distributed cross-validation."""
    n_samples = len(X)
    fold_size = n_samples // n_splits
    scores = []
    
    for fold in range(n_splits):
        print(f"Training fold {fold + 1}/{n_splits}")
        
        # Create train/validation splits
        val_start = fold * fold_size
        val_end = (fold + 1) * fold_size if fold < n_splits - 1 else n_samples
        
        # Split indices
        val_indices = list(range(val_start, val_end))
        train_indices = list(range(0, val_start)) + list(range(val_end, n_samples))
        
        # Create training and validation sets
        X_train_fold = X[train_indices]
        y_train_fold = y[train_indices]
        X_val_fold = X[val_indices]
        y_val_fold = y[val_indices]
        
        # Create DMatrix objects
        dtrain_fold = dxgb.DaskDMatrix(client, X_train_fold, y_train_fold)
        dval_fold = dxgb.DaskDMatrix(client, X_val_fold, y_val_fold)
        
        # Train model
        output = dxgb.train(
            client=client,
            params=params,
            dtrain=dtrain_fold,
            num_boost_round=n_rounds,
            evals=[(dtrain_fold, 'train'), (dval_fold, 'val')],
            early_stopping_rounds=10,
            verbose_eval=False
        )
        
        # Get best score
        model = output['booster']
        val_score = model.best_score
        scores.append(val_score)
        print(f"Fold {fold + 1} AUC: {val_score:.4f}")
    
    return scores

# Run distributed CV
cv_scores = distributed_cv(client, X_da, y_da, params, n_splits=5)

print(f"\nCross-validation results:")
print(f"Mean AUC: {np.mean(cv_scores):.4f} ± {np.std(cv_scores):.4f}")
print(f"Individual scores: {[f'{score:.4f}' for score in cv_scores]}")

client.close()

Large-Scale Feature Engineering with Dask

from dask.distributed import Client
import dask.dataframe as dd
import dask.array as da
import xgboost.dask as dxgb
import pandas as pd

# Setup
client = Client(processes=True, n_workers=4, memory_limit='2GB')

# Simulate large dataset loading
# In practice: dd.read_csv('huge_dataset.csv', blocksize='100MB')
large_df = pd.DataFrame({
    'feature_1': np.random.randn(1000000),
    'feature_2': np.random.randn(1000000),
    'feature_3': np.random.randn(1000000),
    'categorical_1': np.random.choice(['A', 'B', 'C', 'D'], 1000000),
    'categorical_2': np.random.choice(['X', 'Y', 'Z'], 1000000),
    'target': np.random.randint(0, 2, 1000000)
})

# Convert to Dask DataFrame with appropriate partitioning
ddf = dd.from_pandas(large_df, npartitions=100)

# Distributed feature engineering
def engineer_features(df):
    """Apply feature engineering transformations."""
    # Numerical transformations
    df['feature_1_squared'] = df['feature_1'] ** 2
    df['feature_2_log'] = dd.log(dd.abs(df['feature_2']) + 1)
    df['feature_interaction'] = df['feature_1'] * df['feature_2']
    
    # Categorical encoding (simple label encoding for demo)
    df['cat_1_encoded'] = df['categorical_1'].map({'A': 0, 'B': 1, 'C': 2, 'D': 3})
    df['cat_2_encoded'] = df['categorical_2'].map({'X': 0, 'Y': 1, 'Z': 2})
    
    return df

# Apply feature engineering
engineered_df = engineer_features(ddf)

# Select features for training
feature_cols = ['feature_1', 'feature_2', 'feature_3', 'feature_1_squared', 
               'feature_2_log', 'feature_interaction', 'cat_1_encoded', 'cat_2_encoded']

X_features = engineered_df[feature_cols]
y_target = engineered_df['target']

# Create train/test split
n_samples = len(engineered_df)
train_size = int(0.8 * n_samples)

X_train = X_features.iloc[:train_size]
X_test = X_features.iloc[train_size:]
y_train = y_target.iloc[:train_size]
y_test = y_target.iloc[train_size:]

# Create DaskDMatrix
dtrain = dxgb.DaskDMatrix(client, X_train, y_train)
dtest = dxgb.DaskDMatrix(client, X_test, y_test)

# Training parameters
params = {
    'objective': 'binary:logistic',
    'eval_metric': ['logloss', 'auc'],
    'max_depth': 8,
    'learning_rate': 0.1,
    'subsample': 0.8,
    'colsample_bytree': 0.8,
    'tree_method': 'hist',
    'max_bin': 256,
    'random_state': 42
}

# Distributed training
print("Starting distributed training on engineered features...")
output = dxgb.train(
    client=client,
    params=params,
    dtrain=dtrain,
    num_boost_round=100,
    evals=[(dtrain, 'train'), (dtest, 'test')],
    early_stopping_rounds=10,
    verbose_eval=10
)

model = output['booster']
print(f"Training completed at iteration {model.best_iteration}")
print(f"Best test score: {model.best_score:.4f}")

# Feature importance analysis
feature_importance = model.get_score(importance_type='gain')
print(f"\nTop 5 most important features:")
sorted_features = sorted(feature_importance.items(), key=lambda x: x[1], reverse=True)
for feature, importance in sorted_features[:5]:
    print(f"{feature}: {importance:.4f}")

client.close()

Install with Tessl CLI

npx tessl i tessl/pypi-xgboost-cpu

docs

core-data-models.md

distributed-computing.md

index.md

sklearn-interface.md

training-evaluation.md

utilities.md

tile.json