Interactive audio waveform rendering and playback library for web applications
—
Advanced audio processing capabilities including decoding, peaks generation, Web Audio integration, and audio data manipulation for high-performance waveform applications.
Access decoded audio data and audio buffer information for advanced audio processing.
interface WaveSurfer {
/**
* Get the decoded audio data as AudioBuffer
* @returns AudioBuffer containing decoded audio data, or null if not loaded
*/
getDecodedData(): AudioBuffer | null;
}
interface WaveSurferOptions {
/** Decoding sample rate. Doesn't affect playback, defaults to 8000 */
sampleRate?: number;
/** Override the Blob MIME type for decoding */
blobMimeType?: string;
}Usage Examples:
// Access decoded audio data
await wavesurfer.load("/audio.mp3");
const audioBuffer = wavesurfer.getDecodedData();
if (audioBuffer) {
console.log(`Channels: ${audioBuffer.numberOfChannels}`);
console.log(`Sample Rate: ${audioBuffer.sampleRate}`);
console.log(`Duration: ${audioBuffer.duration} seconds`);
console.log(`Length: ${audioBuffer.length} samples`);
// Access raw audio data
const leftChannel = audioBuffer.getChannelData(0);
const rightChannel = audioBuffer.numberOfChannels > 1 ?
audioBuffer.getChannelData(1) : null;
}
// Custom sample rate for decoding
const highResWaveform = WaveSurfer.create({
container: "#high-res",
sampleRate: 44100, // Higher sample rate for more detailed analysis
});
// Override MIME type for problematic files
const waveform = WaveSurfer.create({
container: "#custom-mime",
blobMimeType: "audio/wav", // Force WAV decoding
});Generate and export waveform peaks data for caching and performance optimization.
interface ExportPeaksOptions {
/** Number of channels to export (default: 2) */
channels?: number;
/** Maximum length of peaks array (default: 8000) */
maxLength?: number;
/** Precision for peak values (default: 10000) */
precision?: number;
}
interface WaveSurfer {
/**
* Export decoded peaks data for caching or analysis
* @param options - Export configuration options
* @returns Array of peak data arrays, one per channel
*/
exportPeaks(options?: ExportPeaksOptions): Array<number[]>;
}Usage Examples:
// Basic peaks export
await wavesurfer.load("/audio.mp3");
const peaks = wavesurfer.exportPeaks();
console.log(`Exported ${peaks.length} channels of peaks data`);
// High-resolution peaks export
const highResPeaks = wavesurfer.exportPeaks({
channels: 2,
maxLength: 16000, // More data points
precision: 100000, // Higher precision
});
// Save peaks for later use
localStorage.setItem("audio-peaks", JSON.stringify(highResPeaks));
localStorage.setItem("audio-duration", wavesurfer.getDuration().toString());
// Load with cached peaks
const cachedPeaks = JSON.parse(localStorage.getItem("audio-peaks"));
const cachedDuration = parseFloat(localStorage.getItem("audio-duration"));
await wavesurfer.load("/audio.mp3", cachedPeaks, cachedDuration);Use pre-computed peaks and duration data to skip audio decoding for improved performance.
interface WaveSurferOptions {
/** Pre-computed audio data, arrays of floats for each channel */
peaks?: Array<Float32Array | number[]>;
/** Pre-computed audio duration in seconds */
duration?: number;
}
interface WaveSurfer {
/**
* Load audio with pre-computed peaks and duration
* @param url - Audio URL (can be empty string if using only peaks)
* @param peaks - Pre-computed waveform data
* @param duration - Pre-computed duration in seconds
*/
load(url: string, peaks?: Array<Float32Array | number[]>, duration?: number): Promise<void>;
}Usage Examples:
// Load with pre-computed data (no decoding needed)
const precomputedPeaks = [
[0.1, 0.3, -0.2, 0.8, -0.4, 0.2], // Left channel
[0.05, 0.25, -0.15, 0.75, -0.35, 0.15] // Right channel
];
await wavesurfer.load("/audio.mp3", precomputedPeaks, 120.5);
// Peaks-only visualization (no actual audio)
await wavesurfer.load("", precomputedPeaks, 120.5);
// Initialize with peaks in options
const peaksOnlyWaveform = WaveSurfer.create({
container: "#peaks-only",
peaks: precomputedPeaks,
duration: 120.5,
// No URL needed - will render immediately
});Use Web Audio API for advanced audio processing and effects integration.
interface WaveSurferOptions {
/** Playback "backend" to use, defaults to MediaElement */
backend?: 'WebAudio' | 'MediaElement';
}Usage Examples:
// Use Web Audio backend for advanced processing
const webAudioWaveform = WaveSurfer.create({
container: "#webaudio",
backend: "WebAudio",
});
// Access Web Audio context for effects
await webAudioWaveform.load("/audio.mp3");
const mediaElement = webAudioWaveform.getMediaElement();
// Web Audio effects chain example
if (window.AudioContext) {
const audioContext = new AudioContext();
const source = audioContext.createMediaElementSource(mediaElement);
// Add reverb effect
const convolver = audioContext.createConvolver();
const gainNode = audioContext.createGain();
source.connect(convolver);
convolver.connect(gainNode);
gainNode.connect(audioContext.destination);
// Load impulse response for reverb
fetch("/reverb-impulse.wav")
.then(response => response.arrayBuffer())
.then(data => audioContext.decodeAudioData(data))
.then(buffer => {
convolver.buffer = buffer;
});
}Optimize performance for large audio files using streaming and pre-processing techniques.
interface WaveSurferOptions {
/** Options to pass to the fetch method for loading audio */
fetchParams?: RequestInit;
}Usage Examples:
// Streaming large files with progress
const largeFileWaveform = WaveSurfer.create({
container: "#large-file",
fetchParams: {
cache: "force-cache", // Cache large files
headers: {
"Range": "bytes=0-1048576", // Request first 1MB for preview
},
},
});
// Monitor loading progress
largeFileWaveform.on("loading", (percent) => {
console.log(`Loading: ${percent}%`);
document.getElementById("progress").style.width = `${percent}%`;
});
// Handle large file errors
largeFileWaveform.on("error", (error) => {
console.error("Large file loading failed:", error);
// Fallback to lower quality or pre-computed peaks
});
// Use pre-computed peaks for very large files
const hugePeaks = await fetch("/audio-huge.peaks.json").then(r => r.json());
const hugeDuration = await fetch("/audio-huge.duration.txt").then(r => r.text());
await largeFileWaveform.load("/audio-huge.mp3", hugePeaks, parseFloat(hugeDuration));Handle various audio formats and encoding configurations.
interface WaveSurferOptions {
/** Override the Blob MIME type for problematic files */
blobMimeType?: string;
/** Options to pass to the fetch method */
fetchParams?: RequestInit;
}Usage Examples:
// Handle specific audio formats
const formatSpecificWaveform = WaveSurfer.create({
container: "#format-specific",
blobMimeType: "audio/wav", // Force WAV interpretation
});
// CORS-enabled audio loading
const corsWaveform = WaveSurfer.create({
container: "#cors-audio",
fetchParams: {
mode: "cors",
credentials: "include",
headers: {
"Authorization": "Bearer token123",
},
},
});
// Format detection and fallback
async function loadAudioWithFallback(wavesurfer, urls) {
for (const url of urls) {
try {
await wavesurfer.load(url);
console.log(`Successfully loaded: ${url}`);
break;
} catch (error) {
console.warn(`Failed to load ${url}:`, error);
continue;
}
}
}
// Try multiple formats
await loadAudioWithFallback(wavesurfer, [
"/audio.webm", // Try WebM first
"/audio.ogg", // Fallback to OGG
"/audio.mp3", // Final fallback to MP3
]);Perform audio analysis and processing on decoded audio data.
interface WaveSurfer {
/**
* Get the decoded audio data for analysis
* @returns AudioBuffer with raw audio data
*/
getDecodedData(): AudioBuffer | null;
}Usage Examples:
// Audio analysis functions
function analyzeAudio(audioBuffer) {
const channelData = audioBuffer.getChannelData(0);
// Calculate RMS (Root Mean Square) for volume analysis
let sum = 0;
for (let i = 0; i < channelData.length; i++) {
sum += channelData[i] * channelData[i];
}
const rms = Math.sqrt(sum / channelData.length);
// Find peak amplitude
let peak = 0;
for (let i = 0; i < channelData.length; i++) {
const abs = Math.abs(channelData[i]);
if (abs > peak) peak = abs;
}
return { rms, peak };
}
// Detect silence periods
function detectSilence(audioBuffer, threshold = 0.01) {
const channelData = audioBuffer.getChannelData(0);
const sampleRate = audioBuffer.sampleRate;
const silentRegions = [];
let silentStart = null;
for (let i = 0; i < channelData.length; i++) {
const isQuiet = Math.abs(channelData[i]) < threshold;
if (isQuiet && silentStart === null) {
silentStart = i / sampleRate; // Convert to seconds
} else if (!isQuiet && silentStart !== null) {
silentRegions.push({
start: silentStart,
end: i / sampleRate,
});
silentStart = null;
}
}
return silentRegions;
}
// Use analysis
await wavesurfer.load("/audio.mp3");
const audioBuffer = wavesurfer.getDecodedData();
if (audioBuffer) {
const analysis = analyzeAudio(audioBuffer);
console.log(`RMS: ${analysis.rms}, Peak: ${analysis.peak}`);
const silentParts = detectSilence(audioBuffer);
console.log(`Found ${silentParts.length} silent regions`);
}Install with Tessl CLI
npx tessl i tessl/npm-wavesurfer-js