A pythonic wrapper around FFTW, the FFT library, presenting a unified interface for all the supported transforms.
—
Functions for importing, exporting, and managing FFTW wisdom to optimize transform planning across sessions and applications. FFTW wisdom contains information about optimal algorithms and is crucial for maximizing performance in production environments.
Functions to save and load FFTW optimization data.
def export_wisdom():
"""
Export FFTW wisdom to bytes.
Returns FFTW wisdom as bytes that can be saved to file or database
for later use. Wisdom contains information about optimal transform
algorithms discovered through planning.
Returns:
- bytes: Binary wisdom data
"""
def import_wisdom(wisdom):
"""
Import FFTW wisdom from bytes.
Load previously exported wisdom to accelerate transform planning.
This should be called before creating FFTW objects to benefit from
previously discovered optimal algorithms.
Parameters:
- wisdom: bytes - Binary wisdom data from export_wisdom()
Returns:
- bool: True if wisdom was successfully imported
"""
def forget_wisdom():
"""
Forget all accumulated FFTW wisdom.
Clears all stored optimization information, forcing FFTW to
rediscover optimal algorithms. Useful for testing or when
system configuration changes significantly.
"""import pyfftw
import numpy as np
# Create and plan some transforms to accumulate wisdom
N = 1024
a = pyfftw.empty_aligned(N, dtype='complex128')
b = pyfftw.empty_aligned(N, dtype='complex128')
# Plan several different sizes to build up wisdom
for size in [512, 1024, 2048, 4096]:
if size <= N:
temp_a = a[:size]
temp_b = b[:size]
fft_obj = pyfftw.FFTW(temp_a, temp_b, flags=('FFTW_MEASURE',))
# Export accumulated wisdom
wisdom_data = pyfftw.export_wisdom()
print(f"Wisdom size: {len(wisdom_data)} bytes")
# Clear wisdom
pyfftw.forget_wisdom()
# Import wisdom back
success = pyfftw.import_wisdom(wisdom_data)
print(f"Wisdom import successful: {success}")import pyfftw
import numpy as np
import pickle
import os
wisdom_file = 'fftw_wisdom.pkl'
# Load existing wisdom if available
if os.path.exists(wisdom_file):
with open(wisdom_file, 'rb') as f:
wisdom_data = pickle.load(f)
pyfftw.import_wisdom(wisdom_data)
print("Loaded existing wisdom")
# Perform FFT operations that will generate wisdom
data = np.random.randn(2048) + 1j * np.random.randn(2048)
fft_obj = pyfftw.FFTW(
pyfftw.empty_aligned(2048, dtype='complex128'),
pyfftw.empty_aligned(2048, dtype='complex128'),
flags=('FFTW_PATIENT',) # More thorough planning
)
# Save updated wisdom
wisdom_data = pyfftw.export_wisdom()
with open(wisdom_file, 'wb') as f:
pickle.dump(wisdom_data, f)
print("Saved wisdom to file")import pyfftw
import numpy as np
import json
import base64
class FFTWManager:
"""Manager class for FFTW wisdom in applications."""
def __init__(self, wisdom_file=None):
self.wisdom_file = wisdom_file
self.load_wisdom()
def load_wisdom(self):
"""Load wisdom from file if available."""
if self.wisdom_file and os.path.exists(self.wisdom_file):
try:
with open(self.wisdom_file, 'r') as f:
data = json.load(f)
wisdom_bytes = base64.b64decode(data['wisdom'].encode())
if pyfftw.import_wisdom(wisdom_bytes):
print(f"Loaded wisdom from {self.wisdom_file}")
else:
print("Failed to import wisdom")
except Exception as e:
print(f"Error loading wisdom: {e}")
def save_wisdom(self):
"""Save current wisdom to file."""
if self.wisdom_file:
try:
wisdom_bytes = pyfftw.export_wisdom()
wisdom_b64 = base64.b64encode(wisdom_bytes).decode()
data = {
'wisdom': wisdom_b64,
'timestamp': time.time()
}
with open(self.wisdom_file, 'w') as f:
json.dump(data, f)
print(f"Saved wisdom to {self.wisdom_file}")
except Exception as e:
print(f"Error saving wisdom: {e}")
def create_fft_object(self, input_array, output_array, **kwargs):
"""Create FFTW object and save wisdom afterwards."""
fft_obj = pyfftw.FFTW(input_array, output_array, **kwargs)
self.save_wisdom() # Save any new wisdom
return fft_obj
# Usage
import time
import os
manager = FFTWManager('app_wisdom.json')
# Create FFT objects - wisdom is automatically managed
data = pyfftw.empty_aligned(1024, dtype='complex128')
result = pyfftw.empty_aligned(1024, dtype='complex128')
fft_obj = manager.create_fft_object(
data, result,
flags=('FFTW_MEASURE',)
)import pyfftw
import numpy as np
import time
def benchmark_planning_modes():
"""Compare planning times with and without wisdom."""
sizes = [512, 1024, 2048, 4096]
# First run without wisdom
pyfftw.forget_wisdom()
times_no_wisdom = []
for size in sizes:
a = pyfftw.empty_aligned(size, dtype='complex128')
b = pyfftw.empty_aligned(size, dtype='complex128')
start = time.time()
fft_obj = pyfftw.FFTW(a, b, flags=('FFTW_MEASURE',))
plan_time = time.time() - start
times_no_wisdom.append(plan_time)
print(f"Size {size}: {plan_time:.3f}s (no wisdom)")
# Export wisdom after planning
wisdom = pyfftw.export_wisdom()
# Clear and reimport wisdom
pyfftw.forget_wisdom()
pyfftw.import_wisdom(wisdom)
# Second run with wisdom
times_with_wisdom = []
for size in sizes:
a = pyfftw.empty_aligned(size, dtype='complex128')
b = pyfftw.empty_aligned(size, dtype='complex128')
start = time.time()
fft_obj = pyfftw.FFTW(a, b, flags=('FFTW_MEASURE',))
plan_time = time.time() - start
times_with_wisdom.append(plan_time)
print(f"Size {size}: {plan_time:.3f}s (with wisdom)")
# Show improvements
print("\nImprovement with wisdom:")
for i, size in enumerate(sizes):
speedup = times_no_wisdom[i] / times_with_wisdom[i]
print(f"Size {size}: {speedup:.2f}x faster planning")
# Run benchmark
benchmark_planning_modes()import pyfftw
import numpy as np
import threading
import time
class ThreadSafeWisdomManager:
"""Thread-safe wisdom manager for multi-threaded applications."""
def __init__(self):
self._lock = threading.Lock()
self._wisdom_cache = None
def get_wisdom(self):
"""Get current wisdom in thread-safe manner."""
with self._lock:
return pyfftw.export_wisdom()
def set_wisdom(self, wisdom_data):
"""Set wisdom in thread-safe manner."""
with self._lock:
return pyfftw.import_wisdom(wisdom_data) if wisdom_data else False
def create_optimized_fft(self, shape, dtype='complex128', axes=None):
"""Create FFT object with shared wisdom."""
# Check if we have cached wisdom
if self._wisdom_cache is None:
# Build wisdom for common sizes
self._build_wisdom_cache(shape, dtype, axes)
# Create arrays and FFTW object
input_array = pyfftw.empty_aligned(shape, dtype=dtype)
output_array = pyfftw.empty_aligned(shape, dtype=dtype)
return pyfftw.FFTW(
input_array,
output_array,
axes=axes,
flags=('FFTW_MEASURE',),
threads=1 # Each thread uses single-threaded FFTW
)
def _build_wisdom_cache(self, shape, dtype, axes):
"""Build wisdom cache for common operations."""
with self._lock:
if self._wisdom_cache is not None:
return
print("Building wisdom cache...")
# Plan transforms for this and related sizes
for scale in [0.5, 1.0, 2.0]:
try:
test_shape = tuple(int(s * scale) for s in shape)
test_input = pyfftw.empty_aligned(test_shape, dtype=dtype)
test_output = pyfftw.empty_aligned(test_shape, dtype=dtype)
pyfftw.FFTW(
test_input, test_output,
axes=axes,
flags=('FFTW_MEASURE',)
)
except:
pass # Skip if size is problematic
self._wisdom_cache = pyfftw.export_wisdom()
print("Wisdom cache built")
def worker_function(manager, worker_id, results):
"""Worker function that uses shared wisdom."""
shape = (1024, 512)
# Create FFT object (will use shared wisdom)
fft_obj = manager.create_optimized_fft(shape)
# Perform some transforms
start = time.time()
for i in range(10):
data = np.random.randn(*shape) + 1j * np.random.randn(*shape)
fft_obj.input_array[:] = data
result = fft_obj()
elapsed = time.time() - start
results[worker_id] = elapsed
print(f"Worker {worker_id}: {elapsed:.3f}s")
# Multi-threaded usage
manager = ThreadSafeWisdomManager()
results = {}
threads = []
# Start multiple worker threads
for i in range(4):
thread = threading.Thread(
target=worker_function,
args=(manager, i, results)
)
threads.append(thread)
thread.start()
# Wait for completion
for thread in threads:
thread.join()
print(f"Total time across all workers: {sum(results.values()):.3f}s")Install with Tessl CLI
npx tessl i tessl/pypi-pyfftw