CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-boltons

When they're not builtins, they're boltons.

Pending

Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

Overview
Eval results
Files

caching.mddocs/

Caching

Multiple cache implementations with different eviction strategies, function/method decorators, and cache key generation utilities. Supports LRU (Least Recently Used), LRI (Least Recently Inserted) strategies with hit/miss statistics, property caching, and advanced function caching with scoping and type-aware keys.

Capabilities

Cache Data Structures

High-performance cache implementations with different eviction strategies.

class LRI(dict):
    """
    Least Recently Inserted cache strategy.
    Dict subtype with hit/miss statistics and size limits.
    """
    def __init__(self, max_size=DEFAULT_MAX_SIZE): ...
    @property
    def hit_count(self): ...
    @property  
    def miss_count(self): ...
    @property
    def soft_miss_count(self): ...
    def get_stats(self): ...

class LRU(LRI):
    """
    Least Recently Used cache strategy.
    Extends LRI with access-based ordering for optimal cache performance.
    """
    def __getitem__(self, key): ...
    def get(self, key, default=None): ...

class CachedFunction:
    """Wrapper for functions with caching logic."""
    def __init__(self, cache, func, scoped, typed, key_func): ...
    def __call__(self, *args, **kwargs): ...
    def cache_clear(self): ...
    def cache_info(self): ...

class CachedMethod:
    """Wrapper for methods with caching logic."""
    def __init__(self, cache, func, scoped, typed, key_func): ...
    def __get__(self, obj, objtype): ...

class ThresholdCounter:
    """
    Bounded dict-like mapping with automatic compaction based on thresholds.
    """
    def __init__(self, threshold=DEFAULT_MAX_SIZE, **kwargs): ...
    def __setitem__(self, key, value): ...
    def __getitem__(self, key): ...
    def threshold_check(self): ...

class MinIDMap:
    """
    Assigns weakref-able objects the smallest possible unique integer IDs.
    """
    def __init__(self): ...
    def get(self, obj): ...
    def drop(self, obj): ...

Function and Method Caching

Decorators for caching function and method results.

def cached(cache, scoped=True, typed=False, key=None):
    """
    Decorator to cache any function.
    
    Parameters:
    - cache: Cache object to store results
    - scoped (bool): Include function identity in cache key
    - typed (bool): Include argument types in cache key  
    - key (callable, optional): Custom key generation function
    
    Returns:
    callable: Cached version of the function
    """

def cachedmethod(cache, scoped=True, typed=False, key=None):
    """
    Decorator to cache methods.
    
    Parameters:
    - cache: Cache object or attribute name on instance
    - scoped (bool): Include method identity in cache key
    - typed (bool): Include argument types in cache key
    - key (callable, optional): Custom key generation function
    
    Returns:
    callable: Cached version of the method
    """

Property Caching

Property descriptor that caches computed values.

class cachedproperty:
    """
    Property decorator that caches the result after first access.
    
    Like @property, but caches the result after first computation,
    making subsequent accesses fast for expensive computations.
    """
    def __init__(self, func): ...
    def __get__(self, obj, objtype=None): ...
    def __set__(self, obj, value): ...
    def __delete__(self, obj): ...

Cache Key Generation

Utilities for generating cache keys from function arguments.

def make_cache_key(args, kwargs, typed=False, **kwargs_extra):
    """
    Generate cache keys from function arguments.
    
    Parameters:
    - args (tuple): Positional arguments
    - kwargs (dict): Keyword arguments  
    - typed (bool): Include argument types in key
    
    Returns:
    Hashable cache key
    """

Usage Examples

from boltons.cacheutils import LRU, cached, cachedmethod, cachedproperty

# LRU Cache for direct use
cache = LRU(max_size=128)
cache['expensive_key'] = expensive_computation()
result = cache.get('expensive_key', 'default')

# Check cache statistics
stats = cache.get_stats()
print(f"Hits: {stats.hit_count}, Misses: {stats.miss_count}")

# Function caching decorator
@cached(LRU(max_size=256))
def fibonacci(n):
    if n < 2:
        return n
    return fibonacci(n-1) + fibonacci(n-2)

# Method caching with instance-level cache
class DataProcessor:
    def __init__(self):
        self.cache = LRU(max_size=100)
    
    @cachedmethod('cache')
    def process_data(self, data_id):
        # Expensive data processing
        return expensive_data_processing(data_id)

# Property caching for expensive computations
class ExpensiveCalculation:
    @cachedproperty
    def expensive_result(self):
        # This computation only happens once
        return sum(range(1000000))

# Custom cache key generation
@cached(LRU(), typed=True)  # Include types in cache key
def typed_function(x, y):
    return x + y

# Clear cache when needed
fibonacci.cache_clear()
processor = DataProcessor()
processor.process_data.cache_clear()

Advanced Caching Patterns

from boltons.cacheutils import ThresholdCounter, MinIDMap

# ThresholdCounter for automatic cleanup
counter = ThresholdCounter(threshold=1000)
for i in range(1500):
    counter[f'key_{i}'] = i
# Automatically compacts when threshold exceeded

# MinIDMap for object ID tracking
id_map = MinIDMap()
obj1 = SomeObject()
obj2 = SomeObject()
id1 = id_map.get(obj1)  # Returns smallest available ID
id2 = id_map.get(obj2)  # Returns next smallest ID

# IDs are reused when objects are dropped
del obj1
id_map.drop(obj1)
obj3 = SomeObject()
id3 = id_map.get(obj3)  # Reuses id1

Custom Cache Implementation

from boltons.cacheutils import cached, make_cache_key

# Custom cache with logging
class LoggingCache(dict):
    def __init__(self, max_size=128):
        super().__init__()
        self.max_size = max_size
        self.access_count = 0
    
    def __getitem__(self, key):
        self.access_count += 1
        print(f"Cache access #{self.access_count}: {key}")
        return super().__getitem__(key)
    
    def __setitem__(self, key, value):
        if len(self) >= self.max_size:
            # Simple eviction: remove first item
            del self[next(iter(self))]
        super().__setitem__(key, value)

# Use custom cache with cached decorator
@cached(LoggingCache(max_size=50))
def tracked_function(x):
    return x ** 2

# Custom key function for complex scenarios
def custom_key_func(args, kwargs):
    # Only cache based on first argument
    return args[0] if args else None

@cached(LRU(), key=custom_key_func)
def selective_cache(important_arg, ignored_arg):
    return expensive_operation(important_arg, ignored_arg)

Types

# Constants
DEFAULT_MAX_SIZE = 128

# Cache statistics namedtuple (returned by get_stats())
CacheStats = namedtuple('CacheStats', ['hit_count', 'miss_count', 'soft_miss_count', 'size'])

Install with Tessl CLI

npx tessl i tessl/pypi-boltons

docs

additional-utilities.md

caching.md

data-structures.md

development-debugging-tools.md

file-io-operations.md

format-table-utilities.md

index.md

iteration-processing.md

math-stats-operations.md

network-url-handling.md

string-text-processing.md

time-date-utilities.md

tile.json