CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-autogluon

AutoGluon automates machine learning tasks enabling you to easily achieve strong predictive performance in your applications.

Pending

Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

Overview
Eval results
Files

features.mddocs/

Feature Engineering

Comprehensive feature generation and transformation capabilities for automated feature engineering across different data types. AutoGluon's feature engineering system provides modular, composable feature generators that can handle text, categorical, numerical, and datetime data with intelligent preprocessing pipelines.

Capabilities

AutoML Pipeline Feature Generators

High-level feature generation pipelines that automatically select and configure appropriate feature transformations.

class AutoMLPipelineFeatureGenerator:
    def __init__(
        self,
        enable_numeric_features: bool = True,
        enable_categorical_features: bool = True,
        enable_datetime_features: bool = True,
        enable_text_special_features: bool = True,
        enable_text_ngram_features: bool = True,
        enable_raw_text_features: bool = False,
        enable_vision_features: bool = True,
        **kwargs
    ):
        """
        Initialize automated feature generation pipeline.
        
        Parameters:
        - enable_numeric_features: Generate numerical feature transformations
        - enable_categorical_features: Generate categorical encodings
        - enable_datetime_features: Generate datetime-based features
        - enable_text_special_features: Generate text special character features
        - enable_text_ngram_features: Generate text n-gram features
        - enable_raw_text_features: Keep raw text features
        - enable_vision_features: Generate image-based features
        """

    def fit_transform(self, X, y=None, **kwargs):
        """
        Fit feature generators and transform input data.
        
        Parameters:
        - X: Input DataFrame with raw features
        - y: Target values (optional)
        
        Returns:
        Transformed DataFrame with engineered features
        """

    def transform(self, X, **kwargs):
        """
        Transform input data using fitted feature generators.
        
        Parameters:
        - X: Input DataFrame to transform
        
        Returns:
        Transformed DataFrame with engineered features
        """

class AutoMLInterpretablePipelineFeatureGenerator:
    def __init__(self, **kwargs):
        """
        Initialize interpretable feature generation pipeline.
        
        Similar to AutoMLPipelineFeatureGenerator but focuses on 
        interpretable transformations suitable for model explanation.
        """

Core Feature Generators

Base classes and fundamental feature transformation components.

class AbstractFeatureGenerator:
    def __init__(self, **kwargs):
        """Base class for all feature generators."""
    
    def fit_transform(self, X, y=None, **kwargs):
        """Fit generator and transform data in one step."""
    
    def fit(self, X, y=None, **kwargs):
        """Fit feature generator to training data."""
    
    def transform(self, X, **kwargs):
        """Transform data using fitted generator."""

class PipelineFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, generators: list, **kwargs):
        """
        Chain multiple feature generators in sequence.
        
        Parameters:
        - generators: List of feature generator instances
        """

class BulkFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, generators: list, **kwargs):
        """
        Apply multiple feature generators in parallel.
        
        Parameters:
        - generators: List of feature generator instances
        """

Categorical Feature Processing

Feature generators for categorical data encoding and transformation.

class CategoryFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        cat_order: str = 'count',
        maximum_num_cat: int = 10000,
        verbosity: int = 0,
        **kwargs
    ):
        """
        Generate categorical features with label encoding.
        
        Parameters:
        - cat_order: Category ordering method ('count', 'alphabetic')
        - maximum_num_cat: Maximum number of categories to process
        - verbosity: Logging verbosity level
        """

class OneHotEncoderFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        maximum_num_cat: int = 10,
        minimum_cat_count: int = 30,
        **kwargs
    ):
        """
        Generate one-hot encoded features for categorical data.
        
        Parameters:
        - maximum_num_cat: Maximum categories for one-hot encoding
        - minimum_cat_count: Minimum category frequency for inclusion
        """

class LabelEncoderFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, verbosity: int = 0, **kwargs):
        """
        Generate label encoded features for categorical data.
        
        Parameters:
        - verbosity: Logging verbosity level
        """

Numerical Feature Processing

Feature generators for numerical data transformation and binning.

class BinnedFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        num_bins: int = 10,
        quantile_bin: bool = True,
        **kwargs
    ):
        """
        Generate binned features from numerical data.
        
        Parameters:
        - num_bins: Number of bins to create
        - quantile_bin: Use quantile-based binning
        """

class NumericMemoryMinimizeFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Minimize memory usage of numerical features through dtype optimization.
        """

class CategoryMemoryMinimizeFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Minimize memory usage of categorical features through dtype optimization.
        """

Text Feature Processing

Feature generators specialized for text data processing and transformation.

class TextNgramFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        vectorizer_strategy: str = 'tf-idf',
        max_features: int = 10000,
        ngram_range: tuple = (1, 3),
        **kwargs
    ):
        """
        Generate n-gram features from text data.
        
        Parameters:
        - vectorizer_strategy: Vectorization method ('tf-idf', 'count')
        - max_features: Maximum number of features to generate
        - ngram_range: Range of n-gram sizes (min_n, max_n)
        """

class TextSpecialFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Generate special character and text statistics features.
        
        Creates features like text length, number of words, 
        special character counts, etc.
        """

Datetime Feature Processing

Feature generators for datetime and temporal data transformation.

class DatetimeFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        features_to_extract: list = None,
        **kwargs
    ):
        """
        Generate datetime-based features from timestamp columns.
        
        Parameters:
        - features_to_extract: List of datetime features to extract
          Options: ['year', 'month', 'day', 'dayofweek', 'hour', 'minute', 'second']
        
        Generates features like:
        - Year, month, day components
        - Day of week, hour of day
        - Is weekend, is business hour
        - Cyclical encodings for periodic features
        """

Data Cleaning and Preprocessing

Feature generators for data cleaning and basic preprocessing operations.

class FillNaFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        inplace: bool = True,
        fillna_map: dict = None,
        **kwargs
    ):
        """
        Handle missing values through various imputation strategies.
        
        Parameters:
        - inplace: Modify features in place
        - fillna_map: Custom fill values for specific columns
        """

class DropUniqueFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Remove features with only one unique value (constant features).
        """

class DropDuplicatesFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Remove duplicate features (identical columns).
        """

class IsNanFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Generate binary indicator features for missing values.
        """

Utility Feature Generators

Helper feature generators for type conversion and feature management.

class AsTypeFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        convert_map: dict,
        **kwargs
    ):
        """
        Convert feature data types.
        
        Parameters:
        - convert_map: Dictionary mapping column names to target dtypes
        """

class IdentityFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Pass-through generator that returns features unchanged.
        """

class RenameFeatureGenerator(AbstractFeatureGenerator):
    def __init__(
        self,
        rename_map: dict,
        **kwargs
    ):
        """
        Rename features according to mapping.
        
        Parameters:
        - rename_map: Dictionary mapping old names to new names
        """

class DummyFeatureGenerator(AbstractFeatureGenerator):
    def __init__(self, **kwargs):
        """
        Placeholder generator for testing and debugging.
        """

Usage Examples

Basic Feature Engineering Pipeline

from autogluon.features import AutoMLPipelineFeatureGenerator
import pandas as pd

# Sample dataset with mixed data types
df = pd.DataFrame({
    'numerical_col': [1.5, 2.3, 3.1, 4.7],
    'categorical_col': ['A', 'B', 'A', 'C'],
    'text_col': ['hello world', 'goodbye moon', 'hello again', 'farewell sun'],
    'datetime_col': pd.date_range('2023-01-01', periods=4, freq='D'),
    'target': [0, 1, 0, 1]
})

# Initialize automated feature generator
feature_generator = AutoMLPipelineFeatureGenerator(
    enable_text_ngram_features=True,
    enable_datetime_features=True,
    enable_categorical_features=True
)

# Fit and transform features
X = df.drop('target', axis=1)
y = df['target']

X_transformed = feature_generator.fit_transform(X, y)
print(f"Original features: {X.shape[1]}")
print(f"Engineered features: {X_transformed.shape[1]}")
print(f"New columns: {list(X_transformed.columns)}")

# Transform new data
X_new_transformed = feature_generator.transform(new_data)

Custom Feature Engineering Pipeline

from autogluon.features import (
    PipelineFeatureGenerator,
    DatetimeFeatureGenerator,
    CategoryFeatureGenerator,
    TextNgramFeatureGenerator,
    FillNaFeatureGenerator
)

# Build custom pipeline
custom_pipeline = PipelineFeatureGenerator([
    FillNaFeatureGenerator(),  # Handle missing values first
    DatetimeFeatureGenerator(
        features_to_extract=['year', 'month', 'dayofweek', 'hour']
    ),
    CategoryFeatureGenerator(maximum_num_cat=1000),
    TextNgramFeatureGenerator(
        max_features=5000,
        ngram_range=(1, 2),
        vectorizer_strategy='tf-idf'
    )
])

# Apply custom pipeline
X_custom = custom_pipeline.fit_transform(raw_data, target_data)

Specialized Text Processing

from autogluon.features import TextSpecialFeatureGenerator, TextNgramFeatureGenerator
from autogluon.features import BulkFeatureGenerator

# Combine multiple text feature generators
text_features = BulkFeatureGenerator([
    TextSpecialFeatureGenerator(),  # Text statistics
    TextNgramFeatureGenerator(
        ngram_range=(1, 3),
        max_features=10000,
        vectorizer_strategy='tf-idf'
    )
])

# Process text data
text_df = pd.DataFrame({
    'review_text': ['Great product!', 'Not bad', 'Excellent quality', 'Poor service'],
    'description': ['Short desc', 'Longer description here', 'Brief', 'Detailed info']
})

text_features_generated = text_features.fit_transform(text_df)
print(f"Generated {text_features_generated.shape[1]} text features")

Memory-Optimized Feature Processing

from autogluon.features import (
    AutoMLPipelineFeatureGenerator,
    NumericMemoryMinimizeFeatureGenerator,
    CategoryMemoryMinimizeFeatureGenerator,
    PipelineFeatureGenerator
)

# Memory-optimized pipeline for large datasets
memory_optimized = PipelineFeatureGenerator([
    AutoMLPipelineFeatureGenerator(),
    NumericMemoryMinimizeFeatureGenerator(),
    CategoryMemoryMinimizeFeatureGenerator()
])

# Process large dataset with memory optimization
large_data_processed = memory_optimized.fit_transform(large_dataset)
print(f"Memory usage reduced by dtype optimization")

Install with Tessl CLI

npx tessl i tessl/pypi-autogluon

docs

core.md

features.md

index.md

multimodal.md

tabular.md

timeseries.md

tile.json