CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-xorbits

Scalable Python data science, in an API compatible & lightning fast way.

Overview
Eval results
Files

machine-learning.mddocs/

Machine Learning

Distributed machine learning capabilities through sklearn, XGBoost, and LightGBM integrations. Xorbits enables scalable model training and prediction on large datasets that exceed single-machine memory.

Capabilities

Scikit-learn Integration

Complete sklearn API with distributed computing capabilities across all major machine learning algorithms and utilities.

# Core sklearn submodules - all classes and functions available via dynamic import
from xorbits.sklearn import cluster        # Clustering algorithms
from xorbits.sklearn import datasets       # Dataset loading utilities  
from xorbits.sklearn import decomposition  # Matrix decomposition algorithms
from xorbits.sklearn import ensemble       # Ensemble methods
from xorbits.sklearn import linear_model   # Linear models
from xorbits.sklearn import metrics        # Model evaluation metrics
from xorbits.sklearn import model_selection # Model selection and validation
from xorbits.sklearn import neighbors      # Nearest neighbors algorithms
from xorbits.sklearn import preprocessing  # Data preprocessing
from xorbits.sklearn import semi_supervised # Semi-supervised learning

Example clustering algorithm:

class KMeans:
    """
    K-Means clustering algorithm with distributed computing support.
    
    Provides the same API as sklearn.cluster.KMeans but enables
    clustering of large datasets across multiple workers.
    """

XGBoost Integration

XGBoost distributed training and prediction with Xorbits data structures.

# XGBoost classes and functions available via dynamic import
class DMatrix:
    """
    Data matrix for XGBoost with distributed computing support.
    
    Equivalent to xgboost.DMatrix but works with Xorbits
    distributed arrays and DataFrames.
    """

def train(params, dtrain, **kwargs):
    """
    Train XGBoost model with distributed data.
    
    Parameters:
    - params: dict, XGBoost parameters
    - dtrain: DMatrix, training data
    - **kwargs: Additional training parameters
    
    Returns:
    - Trained XGBoost model
    """

def predict(model, dtest, **kwargs):
    """
    Make predictions with XGBoost model.
    
    Parameters:
    - model: Trained XGBoost model
    - dtest: DMatrix, test data
    - **kwargs: Additional prediction parameters
    
    Returns:
    - Predictions array
    """

LightGBM Integration

LightGBM distributed training with Xorbits data structures.

# LightGBM classes and functions available via dynamic import
class Dataset:
    """
    LightGBM dataset with distributed computing support.
    
    Equivalent to lightgbm.Dataset but works with Xorbits
    distributed arrays and DataFrames.
    """

def train(params, train_set, **kwargs):
    """
    Train LightGBM model with distributed data.
    
    Parameters:
    - params: dict, LightGBM parameters
    - train_set: Dataset, training data
    - **kwargs: Additional training parameters
    
    Returns:
    - Trained LightGBM model
    """

Usage Examples:

Scikit-learn Examples

import xorbits
import xorbits.pandas as pd
import xorbits.numpy as np
from xorbits.sklearn.cluster import KMeans
from xorbits.sklearn.model_selection import train_test_split
from xorbits.sklearn.preprocessing import StandardScaler
from xorbits.sklearn.linear_model import LinearRegression
from xorbits.sklearn.metrics import accuracy_score

xorbits.init()

# Load large dataset
data = pd.read_csv('large_dataset.csv')
X = data.drop('target', axis=1)
y = data['target']

# Preprocessing with distributed computing
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(
    X_scaled, y, test_size=0.2, random_state=42
)

# Clustering
kmeans = KMeans(n_clusters=10, random_state=42)
clusters = kmeans.fit_predict(X_train)

# Linear regression
lr = LinearRegression()
lr.fit(X_train, y_train)
predictions = lr.predict(X_test)

# Evaluate model
accuracy = accuracy_score(y_test, predictions > 0.5)

# Execute computations
results = xorbits.run(clusters, predictions, accuracy)

xorbits.shutdown()

XGBoost Examples

import xorbits
import xorbits.pandas as pd
import xorbits.xgboost as xgb

xorbits.init()

# Load distributed data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')

# Prepare XGBoost data matrices
X_train = train_data.drop('target', axis=1)
y_train = train_data['target']
X_test = test_data.drop('target', axis=1)
y_test = test_data['target']

dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)

# Set XGBoost parameters
params = {
    'objective': 'binary:logistic',
    'max_depth': 6,
    'eta': 0.3,
    'subsample': 0.8,
    'colsample_bytree': 0.8
}

# Train model with distributed computing
model = xgb.train(
    params,
    dtrain,
    num_boost_round=100,
    evals=[(dtest, 'test')],
    early_stopping_rounds=10
)

# Make predictions
predictions = xgb.predict(model, dtest)

# Execute computation
computed_predictions = xorbits.run(predictions)

xorbits.shutdown()

LightGBM Examples

import xorbits
import xorbits.pandas as pd
import xorbits.lightgbm as lgb

xorbits.init()

# Load distributed data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')

# Prepare LightGBM datasets
X_train = train_data.drop('target', axis=1)
y_train = train_data['target']
X_test = test_data.drop('target', axis=1)
y_test = test_data['target']

train_dataset = lgb.Dataset(X_train, label=y_train)
test_dataset = lgb.Dataset(X_test, label=y_test, reference=train_dataset)

# Set LightGBM parameters
params = {
    'objective': 'binary',
    'metric': 'binary_logloss',
    'num_leaves': 31,
    'learning_rate': 0.05,
    'feature_fraction': 0.9
}

# Train model with distributed computing
model = lgb.train(
    params,
    train_dataset,
    valid_sets=[test_dataset],
    num_boost_round=100,
    callbacks=[lgb.early_stopping(10)]
)

# Make predictions
predictions = model.predict(X_test)

# Execute computation
computed_predictions = xorbits.run(predictions)

xorbits.shutdown()

Advanced ML Pipeline Example

import xorbits
import xorbits.pandas as pd
from xorbits.sklearn.pipeline import Pipeline
from xorbits.sklearn.preprocessing import StandardScaler, OneHotEncoder
from xorbits.sklearn.compose import ColumnTransformer
from xorbits.sklearn.ensemble import RandomForestClassifier
from xorbits.sklearn.model_selection import GridSearchCV, cross_val_score

xorbits.init()

# Load large dataset
data = pd.read_csv('large_ml_dataset.csv')
X = data.drop('target', axis=1)
y = data['target']

# Define preprocessing for different column types
numeric_features = X.select_dtypes(include=['int64', 'float64']).columns
categorical_features = X.select_dtypes(include=['object']).columns

preprocessor = ColumnTransformer(
    transformers=[
        ('num', StandardScaler(), numeric_features),
        ('cat', OneHotEncoder(drop='first'), categorical_features)
    ]
)

# Create ML pipeline
pipeline = Pipeline([
    ('preprocessor', preprocessor),
    ('classifier', RandomForestClassifier(random_state=42))
])

# Hyperparameter tuning with distributed computing
param_grid = {
    'classifier__n_estimators': [100, 200],
    'classifier__max_depth': [10, 20, None],
    'classifier__min_samples_split': [2, 5]
}

grid_search = GridSearchCV(
    pipeline, 
    param_grid, 
    cv=5, 
    scoring='accuracy',
    n_jobs=-1
)

# Fit with distributed computing
grid_search.fit(X, y)

# Cross-validation scores
cv_scores = cross_val_score(grid_search.best_estimator_, X, y, cv=5)

# Execute computations
results = xorbits.run(grid_search.best_params_, cv_scores)

xorbits.shutdown()

Install with Tessl CLI

npx tessl i tessl/pypi-xorbits

docs

configuration.md

datasets.md

index.md

machine-learning.md

numpy-integration.md

pandas-integration.md

remote-computing.md

runtime-management.md

tile.json