or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

audio-processing.mdaudio-recording.mdcore-waveform-control.mdevent-system.mdindex.mdplugin-system.mdregions-plugin.mdtimeline-navigation.mdvisual-customization.md

audio-processing.mddocs/

0

# Audio Processing

1

2

Advanced audio processing capabilities including decoding, peaks generation, Web Audio integration, and audio data manipulation for high-performance waveform applications.

3

4

## Capabilities

5

6

### Audio Decoding and Data Access

7

8

Access decoded audio data and audio buffer information for advanced audio processing.

9

10

```typescript { .api }

11

interface WaveSurfer {

12

/**

13

* Get the decoded audio data as AudioBuffer

14

* @returns AudioBuffer containing decoded audio data, or null if not loaded

15

*/

16

getDecodedData(): AudioBuffer | null;

17

}

18

19

interface WaveSurferOptions {

20

/** Decoding sample rate. Doesn't affect playback, defaults to 8000 */

21

sampleRate?: number;

22

23

/** Override the Blob MIME type for decoding */

24

blobMimeType?: string;

25

}

26

```

27

28

**Usage Examples:**

29

30

```typescript

31

// Access decoded audio data

32

await wavesurfer.load("/audio.mp3");

33

const audioBuffer = wavesurfer.getDecodedData();

34

35

if (audioBuffer) {

36

console.log(`Channels: ${audioBuffer.numberOfChannels}`);

37

console.log(`Sample Rate: ${audioBuffer.sampleRate}`);

38

console.log(`Duration: ${audioBuffer.duration} seconds`);

39

console.log(`Length: ${audioBuffer.length} samples`);

40

41

// Access raw audio data

42

const leftChannel = audioBuffer.getChannelData(0);

43

const rightChannel = audioBuffer.numberOfChannels > 1 ?

44

audioBuffer.getChannelData(1) : null;

45

}

46

47

// Custom sample rate for decoding

48

const highResWaveform = WaveSurfer.create({

49

container: "#high-res",

50

sampleRate: 44100, // Higher sample rate for more detailed analysis

51

});

52

53

// Override MIME type for problematic files

54

const waveform = WaveSurfer.create({

55

container: "#custom-mime",

56

blobMimeType: "audio/wav", // Force WAV decoding

57

});

58

```

59

60

### Peaks Generation and Export

61

62

Generate and export waveform peaks data for caching and performance optimization.

63

64

```typescript { .api }

65

interface ExportPeaksOptions {

66

/** Number of channels to export (default: 2) */

67

channels?: number;

68

69

/** Maximum length of peaks array (default: 8000) */

70

maxLength?: number;

71

72

/** Precision for peak values (default: 10000) */

73

precision?: number;

74

}

75

76

interface WaveSurfer {

77

/**

78

* Export decoded peaks data for caching or analysis

79

* @param options - Export configuration options

80

* @returns Array of peak data arrays, one per channel

81

*/

82

exportPeaks(options?: ExportPeaksOptions): Array<number[]>;

83

}

84

```

85

86

**Usage Examples:**

87

88

```typescript

89

// Basic peaks export

90

await wavesurfer.load("/audio.mp3");

91

const peaks = wavesurfer.exportPeaks();

92

console.log(`Exported ${peaks.length} channels of peaks data`);

93

94

// High-resolution peaks export

95

const highResPeaks = wavesurfer.exportPeaks({

96

channels: 2,

97

maxLength: 16000, // More data points

98

precision: 100000, // Higher precision

99

});

100

101

// Save peaks for later use

102

localStorage.setItem("audio-peaks", JSON.stringify(highResPeaks));

103

localStorage.setItem("audio-duration", wavesurfer.getDuration().toString());

104

105

// Load with cached peaks

106

const cachedPeaks = JSON.parse(localStorage.getItem("audio-peaks"));

107

const cachedDuration = parseFloat(localStorage.getItem("audio-duration"));

108

109

await wavesurfer.load("/audio.mp3", cachedPeaks, cachedDuration);

110

```

111

112

### Pre-computed Audio Data

113

114

Use pre-computed peaks and duration data to skip audio decoding for improved performance.

115

116

```typescript { .api }

117

interface WaveSurferOptions {

118

/** Pre-computed audio data, arrays of floats for each channel */

119

peaks?: Array<Float32Array | number[]>;

120

121

/** Pre-computed audio duration in seconds */

122

duration?: number;

123

}

124

125

interface WaveSurfer {

126

/**

127

* Load audio with pre-computed peaks and duration

128

* @param url - Audio URL (can be empty string if using only peaks)

129

* @param peaks - Pre-computed waveform data

130

* @param duration - Pre-computed duration in seconds

131

*/

132

load(url: string, peaks?: Array<Float32Array | number[]>, duration?: number): Promise<void>;

133

}

134

```

135

136

**Usage Examples:**

137

138

```typescript

139

// Load with pre-computed data (no decoding needed)

140

const precomputedPeaks = [

141

[0.1, 0.3, -0.2, 0.8, -0.4, 0.2], // Left channel

142

[0.05, 0.25, -0.15, 0.75, -0.35, 0.15] // Right channel

143

];

144

145

await wavesurfer.load("/audio.mp3", precomputedPeaks, 120.5);

146

147

// Peaks-only visualization (no actual audio)

148

await wavesurfer.load("", precomputedPeaks, 120.5);

149

150

// Initialize with peaks in options

151

const peaksOnlyWaveform = WaveSurfer.create({

152

container: "#peaks-only",

153

peaks: precomputedPeaks,

154

duration: 120.5,

155

// No URL needed - will render immediately

156

});

157

```

158

159

### Web Audio Backend

160

161

Use Web Audio API for advanced audio processing and effects integration.

162

163

```typescript { .api }

164

interface WaveSurferOptions {

165

/** Playback "backend" to use, defaults to MediaElement */

166

backend?: 'WebAudio' | 'MediaElement';

167

}

168

```

169

170

**Usage Examples:**

171

172

```typescript

173

// Use Web Audio backend for advanced processing

174

const webAudioWaveform = WaveSurfer.create({

175

container: "#webaudio",

176

backend: "WebAudio",

177

});

178

179

// Access Web Audio context for effects

180

await webAudioWaveform.load("/audio.mp3");

181

const mediaElement = webAudioWaveform.getMediaElement();

182

183

// Web Audio effects chain example

184

if (window.AudioContext) {

185

const audioContext = new AudioContext();

186

const source = audioContext.createMediaElementSource(mediaElement);

187

188

// Add reverb effect

189

const convolver = audioContext.createConvolver();

190

const gainNode = audioContext.createGain();

191

192

source.connect(convolver);

193

convolver.connect(gainNode);

194

gainNode.connect(audioContext.destination);

195

196

// Load impulse response for reverb

197

fetch("/reverb-impulse.wav")

198

.then(response => response.arrayBuffer())

199

.then(data => audioContext.decodeAudioData(data))

200

.then(buffer => {

201

convolver.buffer = buffer;

202

});

203

}

204

```

205

206

### Large File Handling

207

208

Optimize performance for large audio files using streaming and pre-processing techniques.

209

210

```typescript { .api }

211

interface WaveSurferOptions {

212

/** Options to pass to the fetch method for loading audio */

213

fetchParams?: RequestInit;

214

}

215

```

216

217

**Usage Examples:**

218

219

```typescript

220

// Streaming large files with progress

221

const largeFileWaveform = WaveSurfer.create({

222

container: "#large-file",

223

fetchParams: {

224

cache: "force-cache", // Cache large files

225

headers: {

226

"Range": "bytes=0-1048576", // Request first 1MB for preview

227

},

228

},

229

});

230

231

// Monitor loading progress

232

largeFileWaveform.on("loading", (percent) => {

233

console.log(`Loading: ${percent}%`);

234

document.getElementById("progress").style.width = `${percent}%`;

235

});

236

237

// Handle large file errors

238

largeFileWaveform.on("error", (error) => {

239

console.error("Large file loading failed:", error);

240

// Fallback to lower quality or pre-computed peaks

241

});

242

243

// Use pre-computed peaks for very large files

244

const hugePeaks = await fetch("/audio-huge.peaks.json").then(r => r.json());

245

const hugeDuration = await fetch("/audio-huge.duration.txt").then(r => r.text());

246

247

await largeFileWaveform.load("/audio-huge.mp3", hugePeaks, parseFloat(hugeDuration));

248

```

249

250

### Audio Format Support

251

252

Handle various audio formats and encoding configurations.

253

254

```typescript { .api }

255

interface WaveSurferOptions {

256

/** Override the Blob MIME type for problematic files */

257

blobMimeType?: string;

258

259

/** Options to pass to the fetch method */

260

fetchParams?: RequestInit;

261

}

262

```

263

264

**Usage Examples:**

265

266

```typescript

267

// Handle specific audio formats

268

const formatSpecificWaveform = WaveSurfer.create({

269

container: "#format-specific",

270

blobMimeType: "audio/wav", // Force WAV interpretation

271

});

272

273

// CORS-enabled audio loading

274

const corsWaveform = WaveSurfer.create({

275

container: "#cors-audio",

276

fetchParams: {

277

mode: "cors",

278

credentials: "include",

279

headers: {

280

"Authorization": "Bearer token123",

281

},

282

},

283

});

284

285

// Format detection and fallback

286

async function loadAudioWithFallback(wavesurfer, urls) {

287

for (const url of urls) {

288

try {

289

await wavesurfer.load(url);

290

console.log(`Successfully loaded: ${url}`);

291

break;

292

} catch (error) {

293

console.warn(`Failed to load ${url}:`, error);

294

continue;

295

}

296

}

297

}

298

299

// Try multiple formats

300

await loadAudioWithFallback(wavesurfer, [

301

"/audio.webm", // Try WebM first

302

"/audio.ogg", // Fallback to OGG

303

"/audio.mp3", // Final fallback to MP3

304

]);

305

```

306

307

### Audio Analysis and Processing

308

309

Perform audio analysis and processing on decoded audio data.

310

311

```typescript { .api }

312

interface WaveSurfer {

313

/**

314

* Get the decoded audio data for analysis

315

* @returns AudioBuffer with raw audio data

316

*/

317

getDecodedData(): AudioBuffer | null;

318

}

319

```

320

321

**Usage Examples:**

322

323

```typescript

324

// Audio analysis functions

325

function analyzeAudio(audioBuffer) {

326

const channelData = audioBuffer.getChannelData(0);

327

328

// Calculate RMS (Root Mean Square) for volume analysis

329

let sum = 0;

330

for (let i = 0; i < channelData.length; i++) {

331

sum += channelData[i] * channelData[i];

332

}

333

const rms = Math.sqrt(sum / channelData.length);

334

335

// Find peak amplitude

336

let peak = 0;

337

for (let i = 0; i < channelData.length; i++) {

338

const abs = Math.abs(channelData[i]);

339

if (abs > peak) peak = abs;

340

}

341

342

return { rms, peak };

343

}

344

345

// Detect silence periods

346

function detectSilence(audioBuffer, threshold = 0.01) {

347

const channelData = audioBuffer.getChannelData(0);

348

const sampleRate = audioBuffer.sampleRate;

349

const silentRegions = [];

350

351

let silentStart = null;

352

for (let i = 0; i < channelData.length; i++) {

353

const isQuiet = Math.abs(channelData[i]) < threshold;

354

355

if (isQuiet && silentStart === null) {

356

silentStart = i / sampleRate; // Convert to seconds

357

} else if (!isQuiet && silentStart !== null) {

358

silentRegions.push({

359

start: silentStart,

360

end: i / sampleRate,

361

});

362

silentStart = null;

363

}

364

}

365

366

return silentRegions;

367

}

368

369

// Use analysis

370

await wavesurfer.load("/audio.mp3");

371

const audioBuffer = wavesurfer.getDecodedData();

372

373

if (audioBuffer) {

374

const analysis = analyzeAudio(audioBuffer);

375

console.log(`RMS: ${analysis.rms}, Peak: ${analysis.peak}`);

376

377

const silentParts = detectSilence(audioBuffer);

378

console.log(`Found ${silentParts.length} silent regions`);

379

}

380

```