or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

configuration-utilities.mdcore-fftw.mdfft-builders.mdindex.mdinterfaces-cache.mdmemory-management.mdnumpy-fft-interface.mdscipy-fft-interface.mdscipy-fftpack-interface.mdwisdom-management.md

wisdom-management.mddocs/

0

# Wisdom Management

1

2

Functions for importing, exporting, and managing FFTW wisdom to optimize transform planning across sessions and applications. FFTW wisdom contains information about optimal algorithms and is crucial for maximizing performance in production environments.

3

4

## Capabilities

5

6

### Wisdom Export and Import

7

8

Functions to save and load FFTW optimization data.

9

10

```python { .api }

11

def export_wisdom():

12

"""

13

Export FFTW wisdom to bytes.

14

15

Returns FFTW wisdom as bytes that can be saved to file or database

16

for later use. Wisdom contains information about optimal transform

17

algorithms discovered through planning.

18

19

Returns:

20

- bytes: Binary wisdom data

21

"""

22

23

def import_wisdom(wisdom):

24

"""

25

Import FFTW wisdom from bytes.

26

27

Load previously exported wisdom to accelerate transform planning.

28

This should be called before creating FFTW objects to benefit from

29

previously discovered optimal algorithms.

30

31

Parameters:

32

- wisdom: bytes - Binary wisdom data from export_wisdom()

33

34

Returns:

35

- bool: True if wisdom was successfully imported

36

"""

37

38

def forget_wisdom():

39

"""

40

Forget all accumulated FFTW wisdom.

41

42

Clears all stored optimization information, forcing FFTW to

43

rediscover optimal algorithms. Useful for testing or when

44

system configuration changes significantly.

45

"""

46

```

47

48

## Usage Examples

49

50

### Basic Wisdom Management

51

52

```python

53

import pyfftw

54

import numpy as np

55

56

# Create and plan some transforms to accumulate wisdom

57

N = 1024

58

a = pyfftw.empty_aligned(N, dtype='complex128')

59

b = pyfftw.empty_aligned(N, dtype='complex128')

60

61

# Plan several different sizes to build up wisdom

62

for size in [512, 1024, 2048, 4096]:

63

if size <= N:

64

temp_a = a[:size]

65

temp_b = b[:size]

66

fft_obj = pyfftw.FFTW(temp_a, temp_b, flags=('FFTW_MEASURE',))

67

68

# Export accumulated wisdom

69

wisdom_data = pyfftw.export_wisdom()

70

print(f"Wisdom size: {len(wisdom_data)} bytes")

71

72

# Clear wisdom

73

pyfftw.forget_wisdom()

74

75

# Import wisdom back

76

success = pyfftw.import_wisdom(wisdom_data)

77

print(f"Wisdom import successful: {success}")

78

```

79

80

### Persistent Wisdom Storage

81

82

```python

83

import pyfftw

84

import numpy as np

85

import pickle

86

import os

87

88

wisdom_file = 'fftw_wisdom.pkl'

89

90

# Load existing wisdom if available

91

if os.path.exists(wisdom_file):

92

with open(wisdom_file, 'rb') as f:

93

wisdom_data = pickle.load(f)

94

pyfftw.import_wisdom(wisdom_data)

95

print("Loaded existing wisdom")

96

97

# Perform FFT operations that will generate wisdom

98

data = np.random.randn(2048) + 1j * np.random.randn(2048)

99

fft_obj = pyfftw.FFTW(

100

pyfftw.empty_aligned(2048, dtype='complex128'),

101

pyfftw.empty_aligned(2048, dtype='complex128'),

102

flags=('FFTW_PATIENT',) # More thorough planning

103

)

104

105

# Save updated wisdom

106

wisdom_data = pyfftw.export_wisdom()

107

with open(wisdom_file, 'wb') as f:

108

pickle.dump(wisdom_data, f)

109

print("Saved wisdom to file")

110

```

111

112

### Application Initialization

113

114

```python

115

import pyfftw

116

import numpy as np

117

import json

118

import base64

119

120

class FFTWManager:

121

"""Manager class for FFTW wisdom in applications."""

122

123

def __init__(self, wisdom_file=None):

124

self.wisdom_file = wisdom_file

125

self.load_wisdom()

126

127

def load_wisdom(self):

128

"""Load wisdom from file if available."""

129

if self.wisdom_file and os.path.exists(self.wisdom_file):

130

try:

131

with open(self.wisdom_file, 'r') as f:

132

data = json.load(f)

133

wisdom_bytes = base64.b64decode(data['wisdom'].encode())

134

if pyfftw.import_wisdom(wisdom_bytes):

135

print(f"Loaded wisdom from {self.wisdom_file}")

136

else:

137

print("Failed to import wisdom")

138

except Exception as e:

139

print(f"Error loading wisdom: {e}")

140

141

def save_wisdom(self):

142

"""Save current wisdom to file."""

143

if self.wisdom_file:

144

try:

145

wisdom_bytes = pyfftw.export_wisdom()

146

wisdom_b64 = base64.b64encode(wisdom_bytes).decode()

147

data = {

148

'wisdom': wisdom_b64,

149

'timestamp': time.time()

150

}

151

with open(self.wisdom_file, 'w') as f:

152

json.dump(data, f)

153

print(f"Saved wisdom to {self.wisdom_file}")

154

except Exception as e:

155

print(f"Error saving wisdom: {e}")

156

157

def create_fft_object(self, input_array, output_array, **kwargs):

158

"""Create FFTW object and save wisdom afterwards."""

159

fft_obj = pyfftw.FFTW(input_array, output_array, **kwargs)

160

self.save_wisdom() # Save any new wisdom

161

return fft_obj

162

163

# Usage

164

import time

165

import os

166

167

manager = FFTWManager('app_wisdom.json')

168

169

# Create FFT objects - wisdom is automatically managed

170

data = pyfftw.empty_aligned(1024, dtype='complex128')

171

result = pyfftw.empty_aligned(1024, dtype='complex128')

172

173

fft_obj = manager.create_fft_object(

174

data, result,

175

flags=('FFTW_MEASURE',)

176

)

177

```

178

179

### Benchmarking with Wisdom

180

181

```python

182

import pyfftw

183

import numpy as np

184

import time

185

186

def benchmark_planning_modes():

187

"""Compare planning times with and without wisdom."""

188

189

sizes = [512, 1024, 2048, 4096]

190

191

# First run without wisdom

192

pyfftw.forget_wisdom()

193

times_no_wisdom = []

194

195

for size in sizes:

196

a = pyfftw.empty_aligned(size, dtype='complex128')

197

b = pyfftw.empty_aligned(size, dtype='complex128')

198

199

start = time.time()

200

fft_obj = pyfftw.FFTW(a, b, flags=('FFTW_MEASURE',))

201

plan_time = time.time() - start

202

times_no_wisdom.append(plan_time)

203

204

print(f"Size {size}: {plan_time:.3f}s (no wisdom)")

205

206

# Export wisdom after planning

207

wisdom = pyfftw.export_wisdom()

208

209

# Clear and reimport wisdom

210

pyfftw.forget_wisdom()

211

pyfftw.import_wisdom(wisdom)

212

213

# Second run with wisdom

214

times_with_wisdom = []

215

216

for size in sizes:

217

a = pyfftw.empty_aligned(size, dtype='complex128')

218

b = pyfftw.empty_aligned(size, dtype='complex128')

219

220

start = time.time()

221

fft_obj = pyfftw.FFTW(a, b, flags=('FFTW_MEASURE',))

222

plan_time = time.time() - start

223

times_with_wisdom.append(plan_time)

224

225

print(f"Size {size}: {plan_time:.3f}s (with wisdom)")

226

227

# Show improvements

228

print("\nImprovement with wisdom:")

229

for i, size in enumerate(sizes):

230

speedup = times_no_wisdom[i] / times_with_wisdom[i]

231

print(f"Size {size}: {speedup:.2f}x faster planning")

232

233

# Run benchmark

234

benchmark_planning_modes()

235

```

236

237

### Multi-threaded Wisdom Management

238

239

```python

240

import pyfftw

241

import numpy as np

242

import threading

243

import time

244

245

class ThreadSafeWisdomManager:

246

"""Thread-safe wisdom manager for multi-threaded applications."""

247

248

def __init__(self):

249

self._lock = threading.Lock()

250

self._wisdom_cache = None

251

252

def get_wisdom(self):

253

"""Get current wisdom in thread-safe manner."""

254

with self._lock:

255

return pyfftw.export_wisdom()

256

257

def set_wisdom(self, wisdom_data):

258

"""Set wisdom in thread-safe manner."""

259

with self._lock:

260

return pyfftw.import_wisdom(wisdom_data) if wisdom_data else False

261

262

def create_optimized_fft(self, shape, dtype='complex128', axes=None):

263

"""Create FFT object with shared wisdom."""

264

# Check if we have cached wisdom

265

if self._wisdom_cache is None:

266

# Build wisdom for common sizes

267

self._build_wisdom_cache(shape, dtype, axes)

268

269

# Create arrays and FFTW object

270

input_array = pyfftw.empty_aligned(shape, dtype=dtype)

271

output_array = pyfftw.empty_aligned(shape, dtype=dtype)

272

273

return pyfftw.FFTW(

274

input_array,

275

output_array,

276

axes=axes,

277

flags=('FFTW_MEASURE',),

278

threads=1 # Each thread uses single-threaded FFTW

279

)

280

281

def _build_wisdom_cache(self, shape, dtype, axes):

282

"""Build wisdom cache for common operations."""

283

with self._lock:

284

if self._wisdom_cache is not None:

285

return

286

287

print("Building wisdom cache...")

288

# Plan transforms for this and related sizes

289

for scale in [0.5, 1.0, 2.0]:

290

try:

291

test_shape = tuple(int(s * scale) for s in shape)

292

test_input = pyfftw.empty_aligned(test_shape, dtype=dtype)

293

test_output = pyfftw.empty_aligned(test_shape, dtype=dtype)

294

295

pyfftw.FFTW(

296

test_input, test_output,

297

axes=axes,

298

flags=('FFTW_MEASURE',)

299

)

300

except:

301

pass # Skip if size is problematic

302

303

self._wisdom_cache = pyfftw.export_wisdom()

304

print("Wisdom cache built")

305

306

def worker_function(manager, worker_id, results):

307

"""Worker function that uses shared wisdom."""

308

shape = (1024, 512)

309

310

# Create FFT object (will use shared wisdom)

311

fft_obj = manager.create_optimized_fft(shape)

312

313

# Perform some transforms

314

start = time.time()

315

for i in range(10):

316

data = np.random.randn(*shape) + 1j * np.random.randn(*shape)

317

fft_obj.input_array[:] = data

318

result = fft_obj()

319

320

elapsed = time.time() - start

321

results[worker_id] = elapsed

322

print(f"Worker {worker_id}: {elapsed:.3f}s")

323

324

# Multi-threaded usage

325

manager = ThreadSafeWisdomManager()

326

results = {}

327

threads = []

328

329

# Start multiple worker threads

330

for i in range(4):

331

thread = threading.Thread(

332

target=worker_function,

333

args=(manager, i, results)

334

)

335

threads.append(thread)

336

thread.start()

337

338

# Wait for completion

339

for thread in threads:

340

thread.join()

341

342

print(f"Total time across all workers: {sum(results.values()):.3f}s")

343

```