A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms.
—
Performance optimization system for pyFFTW interfaces that caches FFTW objects to avoid repeated planning overhead. When enabled, this cache stores temporarily created FFTW objects from interface function calls, providing significant performance improvements for repeated transforms with similar parameters.
from pyfftw.interfaces import cachefrom pyfftw.interfaces import cache, numpy_fft
import numpy as np
# Enable caching for better performance with repeated transforms
cache.enable()
# Create sample data
x = np.random.randn(1024) + 1j * np.random.randn(1024)
# First call - creates and caches FFTW object
y1 = numpy_fft.fft(x) # Slower - initial planning
# Second call with equivalent array - uses cached object
y2 = numpy_fft.fft(x) # Much faster - from cache
# Configure cache behavior
cache.set_keepalive_time(1.0) # Keep objects alive for 1 second
# Check cache status
if cache.is_enabled():
print("Cache is active")
# Disable when done
cache.disable()The cache system addresses the overhead of creating FFTW objects in interface functions:
Note: For very small transforms, cache lookup overhead may exceed transform time. In such cases, consider using the FFTW class directly.
Functions to enable, disable, and configure the caching system.
def enable():
"""
Enable the interface cache system.
Enables caching of FFTW objects created during interface function calls.
Spawns a background thread to manage cached objects and their lifetimes.
Raises:
ImportError: If threading is not available on the system
"""
def disable():
"""
Disable the interface cache system.
Disables caching and removes all cached FFTW objects, freeing associated memory.
The background cache management thread is terminated.
"""
def is_enabled():
"""
Check whether the cache is currently enabled.
Returns:
bool: True if cache is enabled, False otherwise
"""
def set_keepalive_time(keepalive_time):
"""
Set the minimum time cached objects are kept alive.
Parameters:
- keepalive_time: float, minimum time in seconds to keep cached objects alive
Notes:
- Default keepalive time is 0.1 seconds
- Objects are removed after being unused for this duration
- Actual removal time may be longer due to thread scheduling
- Using a cached object resets its timer
"""from pyfftw.interfaces import cache, numpy_fft
import numpy as np
# Enable caching
cache.enable()
# Create test data
data = np.random.randn(1024, 512) + 1j * np.random.randn(1024, 512)
# First transform - creates and caches FFTW object
result1 = numpy_fft.fft2(data)
# Equivalent transforms reuse cached object
result2 = numpy_fft.fft2(data) # Fast - from cache
result3 = numpy_fft.fft2(data * 2) # Still uses cache (same array properties)
# Different array properties require new FFTW object
different_data = np.random.randn(512, 512) + 1j * np.random.randn(512, 512)
result4 = numpy_fft.fft2(different_data) # Creates new cached object
cache.disable()from pyfftw.interfaces import cache, scipy_fft
import numpy as np
import time
# Configure cache before enabling
cache.enable()
cache.set_keepalive_time(2.0) # Keep objects alive for 2 seconds
data = np.random.randn(256)
# Use interface functions
fft_result = scipy_fft.fft(data)
# Wait and check if cache is still active
time.sleep(1.5)
if cache.is_enabled():
# Object should still be in cache
fft_result2 = scipy_fft.fft(data) # Fast
time.sleep(1.0) # Total 2.5 seconds - object should be removed
# This will create a new object
fft_result3 = scipy_fft.fft(data) # Slower - new planning
cache.disable()from pyfftw.interfaces import cache, numpy_fft
import numpy as np
import time
data = np.random.randn(2048) + 1j * np.random.randn(2048)
# Without cache
start_time = time.time()
for i in range(10):
result = numpy_fft.fft(data)
no_cache_time = time.time() - start_time
# With cache
cache.enable()
start_time = time.time()
for i in range(10):
result = numpy_fft.fft(data)
cache_time = time.time() - start_time
cache.disable()
print(f"Without cache: {no_cache_time:.3f}s")
print(f"With cache: {cache_time:.3f}s")
print(f"Speedup: {no_cache_time / cache_time:.1f}x")from pyfftw.interfaces import cache, numpy_fft
import numpy as np
from contextlib import contextmanager
@contextmanager
def fftw_cache(keepalive_time=0.1):
"""Context manager for FFTW cache usage."""
try:
cache.enable()
cache.set_keepalive_time(keepalive_time)
yield
finally:
cache.disable()
# Use with context manager
with fftw_cache(keepalive_time=1.0):
data = np.random.randn(1024)
# All transforms in this block use caching
fft1 = numpy_fft.fft(data)
fft2 = numpy_fft.fft(data) # Fast - from cache
# Different sizes create separate cache entries
data2 = np.random.randn(2048)
fft3 = numpy_fft.fft(data2) # New cache entry
fft4 = numpy_fft.fft(data2) # Fast - from cache
# Cache automatically disabled when leaving contextThe cache uses conservative equivalency checking. Objects are considered equivalent when:
disable() immediately frees all cached objectsInstall with Tessl CLI
npx tessl i tessl/pypi-pyfftw