LightGBM is a gradient boosting framework that uses tree-based learning algorithms, designed to be distributed and efficient with faster training speed, higher efficiency, lower memory usage, better accuracy, and support for parallel, distributed, and GPU learning.
—
Low-level LightGBM interface providing direct access to the gradient boosting engine. This interface enables advanced model control, custom objectives, evaluation functions, and fine-tuned training procedures for users who need maximum flexibility.
The core Booster class provides direct access to LightGBM's gradient boosting engine with full control over training parameters and model behavior.
class Booster:
"""
Core LightGBM model class for advanced training and prediction control.
"""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None):
"""
Initialize Booster object.
Parameters:
- params: dict or None - Training parameters
- train_set: Dataset or None - Training dataset
- model_file: str or None - Path to model file to load
- model_str: str or None - Model string to load from
"""
def add_valid(self, data, name):
"""
Add validation dataset.
Parameters:
- data: Dataset - Validation dataset
- name: str - Name for the validation set
"""
def current_iteration(self):
"""
Get current iteration index.
Returns:
- int: Current iteration number
"""
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""
Export model structure to JSON format.
Parameters:
- num_iteration: int or None - Number of iterations to export
- start_iteration: int - Starting iteration to export
- importance_type: str - Feature importance type ('split', 'gain')
Returns:
- dict: Model structure in JSON format
"""
def eval(self, data, name, feval=None):
"""
Evaluate model on given dataset.
Parameters:
- data: Dataset - Dataset to evaluate on
- name: str - Name of the dataset
- feval: callable or None - Custom evaluation function
Returns:
- list: Evaluation results
"""
def eval_train(self, feval=None):
"""Evaluate model on training data."""
def eval_valid(self, feval=None):
"""Evaluate model on validation data."""
def feature_importance(self, importance_type='split', iteration=None):
"""
Get feature importance scores.
Parameters:
- importance_type: str - Type of importance ('split', 'gain')
- iteration: int or None - Iteration to get importance for
Returns:
- numpy.ndarray: Feature importance scores
"""
def feature_name(self):
"""
Get feature names.
Returns:
- list: Feature names
"""
def free_dataset(self):
"""Free dataset memory."""
def get_leaf_output(self, tree_id, leaf_id):
"""
Get leaf output value.
Parameters:
- tree_id: int - Tree index
- leaf_id: int - Leaf index
Returns:
- float: Leaf output value
"""
def set_leaf_output(self, tree_id, leaf_id, val):
"""
Set leaf output value.
Parameters:
- tree_id: int - Tree index
- leaf_id: int - Leaf index
- val: float - New leaf value
"""
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""
Get split value histogram for a feature.
Parameters:
- feature: int or str - Feature index or name
- bins: int or None - Number of histogram bins
- xgboost_style: bool - Whether to use XGBoost-style binning
Returns:
- tuple: (bin_edges, bin_counts)
"""
def lower_bound(self):
"""Get prediction lower bound."""
def upper_bound(self):
"""Get prediction upper bound."""
def model_from_string(self, model_str):
"""
Load model from string representation.
Parameters:
- model_str: str - String representation of model
"""
def model_to_string(self, num_iteration=None, start_iteration=0):
"""
Export model to string representation.
Parameters:
- num_iteration: int or None - Number of iterations to export
- start_iteration: int - Starting iteration to export
Returns:
- str: String representation of model
"""
def num_feature(self):
"""
Get number of features.
Returns:
- int: Number of features
"""
def num_model_per_iteration(self):
"""
Get number of models per iteration.
Returns:
- int: Number of models per iteration
"""
def num_trees(self):
"""
Get total number of trees.
Returns:
- int: Total number of trees
"""
def predict(self, data, start_iteration=0, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""
Make predictions on data.
Parameters:
- data: array-like, Dataset, or str - Input data or filename
- start_iteration: int - Starting iteration for prediction
- num_iteration: int or None - Number of iterations to use
- pred_leaf: bool - Whether to predict leaf indices
- pred_contrib: bool - Whether to predict feature contributions
Returns:
- numpy.ndarray: Predictions
"""
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""
Refit model with new data using online learning.
Parameters:
- data: array-like - New training data
- label: array-like - New training labels
- decay_rate: float - Decay rate for online learning
"""
def reset_parameter(self, params):
"""
Reset model parameters.
Parameters:
- params: dict - New parameters to set
"""
def rollback_one_iter(self):
"""Rollback one iteration."""
def save_model(self, filename, num_iteration=None, start_iteration=0):
"""
Save model to file.
Parameters:
- filename: str - Output filename
- num_iteration: int or None - Number of iterations to save
- start_iteration: int - Starting iteration to save
"""
def set_network(self, machines, local_listen_port=12400,
listen_time_out=120, num_machines=1):
"""
Setup distributed training network.
Parameters:
- machines: str - Machine list for distributed training
- local_listen_port: int - Local listening port
- listen_time_out: int - Listen timeout in seconds
- num_machines: int - Number of machines
"""
def free_network(self):
"""Free network resources."""
def set_train_data_name(self, name):
"""
Set training data name.
Parameters:
- name: str - Training data name
"""
def shuffle_models(self, start_iter=0, end_iter=-1):
"""
Shuffle model order.
Parameters:
- start_iter: int - Starting iteration
- end_iter: int - Ending iteration (-1 for all)
"""
def trees_to_dataframe(self):
"""
Convert trees to pandas DataFrame format.
Returns:
- pandas.DataFrame: Tree structure as DataFrame
"""
def update(self, train_set=None, fobj=None):
"""
Update model for one iteration.
Parameters:
- train_set: Dataset or None - Training dataset
- fobj: callable or None - Custom objective function
Returns:
- bool: True if updated successfully
"""The Dataset class provides efficient data handling and preprocessing capabilities for LightGBM training.
class Dataset:
"""
LightGBM dataset wrapper for efficient data handling and preprocessing.
"""
def __init__(self, data, label=None, reference=None, weight=None, group=None,
init_score=None, feature_name='auto', categorical_feature='auto',
params=None, free_raw_data=True, position=None):
"""
Initialize Dataset object.
Parameters:
- data: array-like, pandas DataFrame, or str - Input data or filename
- label: array-like or None - Target values
- reference: Dataset or None - Reference dataset for validation
- weight: array-like or None - Sample weights
- group: array-like or None - Group/query sizes for ranking
- init_score: array-like or None - Initial prediction scores
- feature_name: list or 'auto' - Feature names
- categorical_feature: list or 'auto' - Categorical feature indices/names
- params: dict or None - Dataset parameters
- free_raw_data: bool - Whether to free raw data after construction
- position: array-like or None - Position information
"""
def add_features_from(self, other):
"""
Add features from another dataset.
Parameters:
- other: Dataset - Source dataset for additional features
"""
def construct(self):
"""Lazy initialization of dataset."""
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, position=None, **kwargs):
"""
Create validation dataset with same parameters.
Parameters:
- data: array-like - Validation data
- label: array-like or None - Validation labels
- weight: array-like or None - Validation sample weights
- group: array-like or None - Validation group sizes
- init_score: array-like or None - Validation initial scores
- position: array-like or None - Validation position info
Returns:
- Dataset: Validation dataset object
"""
def feature_num_bin(self, feature):
"""
Get number of bins for a feature.
Parameters:
- feature: int or str - Feature index or name
Returns:
- int: Number of bins for the feature
"""
def get_data(self):
"""
Get raw data reference.
Returns:
- Reference to raw data
"""
def get_field(self, field_name):
"""
Get dataset field value.
Parameters:
- field_name: str - Field name ('label', 'weight', 'group', etc.)
Returns:
- Field value
"""
def get_feature_name(self):
"""
Get feature names.
Returns:
- list: Feature names
"""
def get_group(self):
"""Get group field."""
def get_init_score(self):
"""Get initial score field."""
def get_label(self):
"""Get label field."""
def get_position(self):
"""Get position field."""
def get_weight(self):
"""Get weight field."""
def get_ref_chain(self, ref_limit=100):
"""
Get reference dataset chain.
Parameters:
- ref_limit: int - Maximum reference chain length
Returns:
- list: Reference dataset chain
"""
def num_data(self):
"""
Get number of data points.
Returns:
- int: Number of data points
"""
def num_feature(self):
"""
Get number of features.
Returns:
- int: Number of features
"""
def save_binary(self, filename):
"""
Save dataset in binary format.
Parameters:
- filename: str - Output filename
"""
def set_categorical_feature(self, categorical_feature):
"""
Set categorical features.
Parameters:
- categorical_feature: list - Categorical feature indices/names
"""
def set_feature_name(self, feature_name):
"""
Set feature names.
Parameters:
- feature_name: list - Feature names
"""
def set_field(self, field_name, data):
"""
Set dataset field value.
Parameters:
- field_name: str - Field name
- data: array-like - Field data
"""
def set_group(self, group):
"""Set group field."""
def set_init_score(self, init_score):
"""Set initial score field."""
def set_label(self, label):
"""Set label field."""
def set_position(self, position):
"""Set position field."""
def set_weight(self, weight):
"""Set weight field."""
def set_reference(self, reference):
"""
Set reference dataset.
Parameters:
- reference: Dataset - Reference dataset
"""
def subset(self, used_indices, **kwargs):
"""
Create dataset subset.
Parameters:
- used_indices: array-like - Indices to include in subset
Returns:
- Dataset: Subset dataset
"""High-level training functions that provide convenient interfaces for model training and cross-validation.
def train(params, train_set, num_boost_round=100, valid_sets=None,
valid_names=None, feval=None, init_model=None, feature_name='auto',
categorical_feature='auto', keep_training_booster=False, callbacks=None):
"""
Train LightGBM model with specified parameters.
Parameters:
- params: dict - Training parameters
- train_set: Dataset - Training dataset
- num_boost_round: int - Number of boosting iterations
- valid_sets: list or None - List of validation datasets
- valid_names: list or None - Names for validation sets
- feval: callable or None - Custom evaluation function
- init_model: str, Booster, or None - Initial model for continued training
- feature_name: list or 'auto' - Feature names
- categorical_feature: list or 'auto' - Categorical features
- keep_training_booster: bool - Whether to keep training booster
- callbacks: list or None - List of callback functions
Returns:
- Booster: Trained model
"""
def cv(params, train_set, num_boost_round=100, folds=None, nfold=5,
stratified=True, shuffle=True, metrics=None, feval=None, init_model=None,
fpreproc=None, feature_name='auto', categorical_feature='auto',
seed=0, callbacks=None, eval_train_metric=False, return_cvbooster=False):
"""
Perform k-fold cross-validation.
Parameters:
- params: dict - Training parameters
- train_set: Dataset - Training dataset
- num_boost_round: int - Number of boosting iterations
- folds: generator or None - Custom cross-validation generator
- nfold: int - Number of CV folds
- stratified: bool - Whether to use stratified CV
- shuffle: bool - Whether to shuffle data before splitting
- metrics: str, list, or None - Evaluation metrics
- feval: callable or None - Custom evaluation function
- init_model: str, Booster, or None - Initial model
- fpreproc: callable or None - Preprocessing function
- feature_name: list or 'auto' - Feature names
- categorical_feature: list or 'auto' - Categorical features
- seed: int - Random seed for CV splits
- callbacks: list or None - List of callback functions
- eval_train_metric: bool - Whether to evaluate training metric
- return_cvbooster: bool - Whether to return CVBooster object
Returns:
- dict or CVBooster: CV results dictionary or CVBooster object
"""
class CVBooster:
"""
Container for cross-validation boosters and results.
"""
def __init__(self, model_file=None):
"""
Initialize CVBooster object.
Parameters:
- model_file: str or None - Model file to load from
"""
def model_from_string(self, model_str):
"""
Load CVBooster from string representation.
Parameters:
- model_str: str - String representation
"""
def model_to_string(self):
"""
Export CVBooster to string representation.
Returns:
- str: String representation
"""
def save_model(self, filename, num_iteration=None):
"""
Save CVBooster to file.
Parameters:
- filename: str - Output filename
- num_iteration: int or None - Number of iterations to save
"""
@property
def boosters(self):
"""List of trained booster objects for each fold."""
@property
def best_iteration(self):
"""Best iteration number across all folds."""Abstract base class for implementing custom data sources.
class Sequence:
"""
Generic data access interface for custom data sources.
This abstract base class allows you to implement custom data loading
for scenarios where data cannot fit in memory or needs special handling.
"""
batch_size = 4096 # Default batch size
def __getitem__(self, idx):
"""
Abstract method for data access by index.
Parameters:
- idx: int - Data index
Returns:
- Data item at the specified index
"""
raise NotImplementedError()
def __len__(self):
"""
Abstract method returning sequence length.
Returns:
- int: Total number of items in sequence
"""
raise NotImplementedError()Additional utilities for logging and error handling.
def register_logger(logger, info_method_name="info", warning_method_name="warning"):
"""
Register custom logger for LightGBM messages.
Parameters:
- logger: Logger object - Custom logger instance
- info_method_name: str - Name of info logging method
- warning_method_name: str - Name of warning logging method
"""
class LightGBMError(Exception):
"""Custom exception for LightGBM-specific errors."""
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning for LightGBM."""import lightgbm as lgb
import numpy as np
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
# Load and prepare data
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Create LightGBM datasets
train_data = lgb.Dataset(X_train, label=y_train)
test_data = lgb.Dataset(X_test, label=y_test, reference=train_data)
# Set parameters
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'boosting_type': 'gbdt',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'verbose': -1
}
# Train model
model = lgb.train(
params,
train_data,
num_boost_round=100,
valid_sets=[test_data],
valid_names=['test'],
callbacks=[lgb.early_stopping(10), lgb.log_evaluation(20)]
)
# Make predictions
predictions = model.predict(X_test)
binary_predictions = (predictions > 0.5).astype(int)
print(f"Accuracy: {(binary_predictions == y_test).mean():.4f}")
print(f"Feature importance: {model.feature_importance()[:5]}")import lightgbm as lgb
import numpy as np
from sklearn.datasets import load_diabetes
# Load data
X, y = load_diabetes(return_X_y=True)
train_data = lgb.Dataset(X, label=y)
# Set parameters
params = {
'objective': 'regression',
'metric': 'rmse',
'boosting_type': 'gbdt',
'num_leaves': 31,
'learning_rate': 0.05,
'verbose': -1
}
# Perform cross-validation
cv_results = lgb.cv(
params,
train_data,
num_boost_round=100,
nfold=5,
stratified=False,
shuffle=True,
seed=42,
return_cvbooster=True,
callbacks=[lgb.early_stopping(10), lgb.log_evaluation(20)]
)
print(f"Best CV score: {cv_results['valid rmse-mean'][-1]:.4f}")
print(f"Best iteration: {len(cv_results['valid rmse-mean'])}")
# Access individual fold models
cvbooster = cv_results # When return_cvbooster=True
print(f"Number of fold models: {len(cvbooster.boosters)}")import lightgbm as lgb
import numpy as np
from sklearn.datasets import make_regression
# Create sample data
X, y = make_regression(n_samples=1000, n_features=10, random_state=42)
train_data = lgb.Dataset(X, label=y)
def custom_objective(y_true, y_pred):
"""Custom objective function (L1 loss)."""
residual = y_pred - y_true
grad = np.sign(residual)
hess = np.ones_like(residual)
return grad, hess
def custom_eval(y_true, y_pred):
"""Custom evaluation function."""
residual = y_pred - y_true
mae = np.mean(np.abs(residual))
return 'mae', mae, False # (eval_name, eval_result, is_higher_better)
# Train with custom functions
model = lgb.train(
{'verbose': -1},
train_data,
num_boost_round=100,
fobj=custom_objective,
feval=custom_eval
)
predictions = model.predict(X)
print(f"Custom MAE: {np.mean(np.abs(predictions - y)):.4f}")Install with Tessl CLI
npx tessl i tessl/pypi-lightgbm