The ElevenLabs SDK includes helpful utility functions for audio playback, error handling, and development workflows. These utilities simplify common tasks like playing generated audio, handling API errors gracefully, and managing audio streams.
import {
play, // Audio playback using ffplay
stream, // Audio streaming using mpv
ElevenLabsError, // Base API error class
ElevenLabsTimeoutError, // Timeout-specific error class
ElevenLabsEnvironment // Environment configuration
} from 'elevenlabs';The play() function provides simple audio playback using ffplay (part of the FFmpeg suite).
import { ElevenLabsClient, play } from 'elevenlabs';
const client = new ElevenLabsClient();
// Generate audio and play it immediately
const audio = await client.textToSpeech.convert(
"21m00Tcm4TlvDq8ikWAM",
{
text: "Hello world! This audio will be played using ffplay.",
model_id: "eleven_multilingual_v2"
}
);
// Play the audio (requires ffplay to be installed)
await play(audio);
console.log('Audio playback completed');// The play() function requires ffplay (part of FFmpeg)
// Installation instructions:
// macOS (using Homebrew)
// brew install ffmpeg
// Ubuntu/Debian
// sudo apt update && sudo apt install ffmpeg
// Windows
// Download from https://ffmpeg.org/download.html
// Verify installation
import commandExists from 'command-exists';
if (!commandExists.sync('ffplay')) {
console.error('ffplay not found. Please install FFmpeg.');
console.error('macOS: brew install ffmpeg');
console.error('Linux: sudo apt install ffmpeg');
console.error('Windows: https://ffmpeg.org/download.html');
}import * as fs from 'fs';
// Play audio with error handling
async function playAudioSafely(audioStream: stream.Readable) {
try {
await play(audioStream);
console.log('β Audio played successfully');
} catch (error) {
if (error instanceof ElevenLabsError) {
if (error.message.includes('ffplay')) {
console.error('FFplay not installed. Please install FFmpeg first.');
console.error('Installation: https://ffmpeg.org/download.html');
} else {
console.error('Playback error:', error.message);
}
} else {
console.error('Unexpected playback error:', error);
}
}
}
// Usage
const audio = await client.textToSpeech.convert(voiceId, { text: "Test audio" });
await playAudioSafely(audio);The stream() function provides audio streaming using mpv for real-time playback.
import { stream } from 'elevenlabs';
// Stream audio using mpv (Node.js only)
const audioStream = await client.textToSpeech.convertAsStream(
"pNInz6obpgDQGcFmaJgB",
{
text: "This audio will be streamed in real-time using mpv.",
model_id: "eleven_turbo_v2_5",
optimize_streaming_latency: 3
}
);
// Stream the audio (requires mpv to be installed)
await stream(audioStream);
console.log('Audio streaming completed');// The stream() function requires mpv and only works in Node.js
// Installation instructions:
// macOS (using Homebrew)
// brew install mpv
// Ubuntu/Debian
// sudo apt update && sudo apt install mpv
// Windows
// Download from https://mpv.io/installation/
// Verify installation and runtime
import { RUNTIME } from 'elevenlabs/core';
if (RUNTIME.type !== 'node') {
console.error('stream() function only works in Node.js environments');
} else if (!commandExists.sync('mpv')) {
console.error('mpv not found. Please install mpv.');
console.error('macOS: brew install mpv');
console.error('Linux: sudo apt install mpv');
console.error('Windows: https://mpv.io/installation/');
}// Stream with comprehensive error handling
async function streamAudioSafely(audioStream: stream.Readable) {
try {
await stream(audioStream);
console.log('β Audio streamed successfully');
} catch (error) {
if (error instanceof ElevenLabsError) {
if (error.message.includes('node environments')) {
console.error('stream() only works in Node.js, not in browsers');
} else if (error.message.includes('mpv')) {
console.error('mpv not installed. Please install mpv first.');
console.error('Installation: https://mpv.io/installation/');
} else {
console.error('Streaming error:', error.message);
}
} else {
console.error('Unexpected streaming error:', error);
}
}
}
// Real-time streaming example
const streamingAudio = await client.textToSpeech.convertAsStream(
voiceId,
{
text: "Real-time streaming with mpv",
optimize_streaming_latency: 4
}
);
await streamAudioSafely(streamingAudio);The ElevenLabsError class provides structured error information for API issues.
interface ElevenLabsError extends Error {
/** HTTP status code (if applicable) */
readonly statusCode?: number;
/** Response body containing error details */
readonly body?: unknown;
/** Error message */
message: string;
}import { ElevenLabsError, ElevenLabsTimeoutError } from 'elevenlabs';
try {
const audio = await client.textToSpeech.convert(
"invalid_voice_id", // This will cause an error
{ text: "Test" }
);
} catch (error) {
if (error instanceof ElevenLabsError) {
console.error('API Error Details:');
console.error('- Status Code:', error.statusCode);
console.error('- Message:', error.message);
console.error('- Body:', error.body);
} else {
console.error('Unexpected error:', error);
}
}// Complete error handling for different scenarios
async function handleElevenLabsOperation<T>(
operation: () => Promise<T>
): Promise<T | null> {
try {
return await operation();
} catch (error) {
if (error instanceof ElevenLabsTimeoutError) {
console.error('β±οΈ Request timed out:', error.message);
// Implement retry logic
return null;
} else if (error instanceof ElevenLabsError) {
console.error('π¨ API Error:');
switch (error.statusCode) {
case 400:
console.error('β Bad Request - Invalid parameters:', error.body);
break;
case 401:
console.error('π Unauthorized - Check your API key');
console.error('Set ELEVENLABS_API_KEY environment variable or pass apiKey to client');
break;
case 403:
console.error('π« Forbidden - Insufficient permissions or quota exceeded');
break;
case 404:
console.error('β Not Found - Resource does not exist:', error.body);
break;
case 413:
console.error('π¦ Payload Too Large - File or request too big');
break;
case 422:
console.error('β οΈ Validation Error - Invalid input data:', error.body);
break;
case 429:
console.error('π¦ Rate Limit Exceeded - Too many requests');
console.error('Implement exponential backoff retry strategy');
break;
case 500:
console.error('π₯ Internal Server Error - Try again later');
break;
default:
console.error(`β Unexpected Status ${error.statusCode}:`, error.message);
console.error('Response Body:', error.body);
}
return null;
} else {
console.error('π₯ Unexpected Error:', error);
return null;
}
}
}
// Usage examples
const audio = await handleElevenLabsOperation(() =>
client.textToSpeech.convert(voiceId, { text: "Test" })
);
const voices = await handleElevenLabsOperation(() =>
client.voices.getAll()
);Specific error class for request timeouts.
interface ElevenLabsTimeoutError extends Error {
message: string;
}
// Handling timeout errors with retry logic
async function robustApiCall<T>(
operation: () => Promise<T>,
maxRetries = 3,
baseDelay = 1000
): Promise<T> {
let lastError: Error;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
lastError = error;
if (error instanceof ElevenLabsTimeoutError) {
console.warn(`Attempt ${attempt}/${maxRetries} timed out`);
if (attempt < maxRetries) {
const delay = baseDelay * Math.pow(2, attempt - 1); // Exponential backoff
console.log(`Retrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
}
} else {
// Non-timeout error, don't retry
throw error;
}
}
}
throw lastError!;
}
// Usage
const audio = await robustApiCall(() =>
client.textToSpeech.convert(voiceId, { text: "Robust operation" }, {
timeoutInSeconds: 30
})
);Configuration for different API environments.
import { ElevenLabsEnvironment } from 'elevenlabs';
// Available environments
const environments = {
// Default production environment
production: ElevenLabsEnvironment.Production,
// US-specific endpoints
productionUs: ElevenLabsEnvironment.ProductionUs,
// EU-specific endpoints
productionEu: ElevenLabsEnvironment.ProductionEu
};
console.log('Production:', ElevenLabsEnvironment.Production);
// Output: { base: "https://api.elevenlabs.io", wss: "wss://api.elevenlabs.io" }
// Use different environments
const clientEu = new ElevenLabsClient({
apiKey: "your-api-key",
environment: ElevenLabsEnvironment.ProductionEu
});
const clientUs = new ElevenLabsClient({
apiKey: "your-api-key",
environment: ElevenLabsEnvironment.ProductionUs
});import * as fs from 'fs';
import { pipeline } from 'stream';
import { promisify } from 'util';
const pipelineAsync = promisify(pipeline);
// Save audio stream to file
async function saveAudio(
audioStream: stream.Readable,
filename: string
): Promise<void> {
try {
await pipelineAsync(audioStream, fs.createWriteStream(filename));
console.log(`β Audio saved: ${filename}`);
} catch (error) {
console.error(`β Failed to save ${filename}:`, error);
throw error;
}
}
// Usage
const audio = await client.textToSpeech.convert(voiceId, { text: "Save this audio" });
await saveAudio(audio, 'output.mp3');// Convert audio stream to buffer for in-memory processing
async function audioStreamToBuffer(audioStream: stream.Readable): Promise<Buffer> {
const chunks: Buffer[] = [];
return new Promise((resolve, reject) => {
audioStream.on('data', (chunk: Buffer) => {
chunks.push(chunk);
});
audioStream.on('end', () => {
resolve(Buffer.concat(chunks));
});
audioStream.on('error', (error) => {
reject(error);
});
});
}
// Usage
const audio = await client.textToSpeech.convert(voiceId, { text: "Buffer this" });
const audioBuffer = await audioStreamToBuffer(audio);
console.log(`Audio buffer size: ${audioBuffer.length} bytes`);
// Convert buffer back to readable stream
function bufferToStream(buffer: Buffer): stream.Readable {
return stream.Readable.from(buffer);
}
const newStream = bufferToStream(audioBuffer);
await play(newStream);// Detect audio format from buffer
function detectAudioFormat(buffer: Buffer): string {
// Check for common audio format signatures
if (buffer.length < 4) return 'unknown';
// MP3 format
if (buffer[0] === 0xFF && (buffer[1] & 0xE0) === 0xE0) {
return 'mp3';
}
// WAV format
if (buffer.subarray(0, 4).toString() === 'RIFF' &&
buffer.subarray(8, 12).toString() === 'WAVE') {
return 'wav';
}
// FLAC format
if (buffer.subarray(0, 4).toString() === 'fLaC') {
return 'flac';
}
// OGG format
if (buffer.subarray(0, 4).toString() === 'OggS') {
return 'ogg';
}
return 'unknown';
}
// Usage
const audio = await client.textToSpeech.convert(voiceId, {
text: "Format detection test",
output_format: "mp3_44100_128"
});
const buffer = await audioStreamToBuffer(audio);
const format = detectAudioFormat(buffer);
console.log(`Detected format: ${format}`);// Log API requests for debugging
class LoggingClient extends ElevenLabsClient {
constructor(options: any = {}) {
super(options);
}
// Override methods to add logging
async textToSpeech() {
const result = super.textToSpeech;
return new Proxy(result, {
get: (target, prop) => {
const originalMethod = target[prop];
if (typeof originalMethod === 'function') {
return async function(...args: any[]) {
console.log(`π TTS API Call: ${prop}`, {
timestamp: new Date().toISOString(),
arguments: args.length
});
const startTime = Date.now();
try {
const result = await originalMethod.apply(target, args);
const duration = Date.now() - startTime;
console.log(`β TTS Success: ${prop} (${duration}ms)`);
return result;
} catch (error) {
const duration = Date.now() - startTime;
console.error(`β TTS Error: ${prop} (${duration}ms)`, error.message);
throw error;
}
};
}
return originalMethod;
}
});
}
}
// Usage
const loggingClient = new LoggingClient();
const audio = await loggingClient.textToSpeech.convert(voiceId, { text: "Logged request" });// Monitor API performance
class PerformanceMonitor {
private metrics: Map<string, number[]> = new Map();
async measureOperation<T>(
name: string,
operation: () => Promise<T>
): Promise<T> {
const startTime = Date.now();
try {
const result = await operation();
const duration = Date.now() - startTime;
this.recordMetric(name, duration);
console.log(`π ${name}: ${duration}ms`);
return result;
} catch (error) {
const duration = Date.now() - startTime;
console.error(`π ${name} failed: ${duration}ms`);
throw error;
}
}
private recordMetric(name: string, duration: number) {
if (!this.metrics.has(name)) {
this.metrics.set(name, []);
}
this.metrics.get(name)!.push(duration);
}
getStats(name: string) {
const durations = this.metrics.get(name) || [];
if (durations.length === 0) return null;
const sorted = durations.sort((a, b) => a - b);
const sum = durations.reduce((a, b) => a + b, 0);
return {
count: durations.length,
min: sorted[0],
max: sorted[sorted.length - 1],
avg: Math.round(sum / durations.length),
median: sorted[Math.floor(sorted.length / 2)],
p95: sorted[Math.floor(sorted.length * 0.95)]
};
}
getAllStats() {
const stats: Record<string, any> = {};
for (const name of this.metrics.keys()) {
stats[name] = this.getStats(name);
}
return stats;
}
}
// Usage
const monitor = new PerformanceMonitor();
const audio = await monitor.measureOperation('tts-convert', () =>
client.textToSpeech.convert(voiceId, { text: "Performance test" })
);
const voices = await monitor.measureOperation('voices-list', () =>
client.voices.getAll()
);
console.log('Performance stats:', monitor.getAllStats());// Collection of helpful utilities
export class ElevenLabsUtils {
private client: ElevenLabsClient;
constructor(client: ElevenLabsClient) {
this.client = client;
}
// Generate and play audio in one call
async speakText(voiceId: string, text: string): Promise<void> {
const audio = await this.client.textToSpeech.convert(voiceId, { text });
await play(audio);
}
// Generate and save audio in one call
async generateToFile(
voiceId: string,
text: string,
filename: string
): Promise<void> {
const audio = await this.client.textToSpeech.convert(voiceId, { text });
await saveAudio(audio, filename);
}
// Find voice by name
async findVoiceByName(name: string): Promise<string | null> {
const voices = await this.client.voices.getAll();
const voice = voices.voices.find(v =>
v.name?.toLowerCase() === name.toLowerCase()
);
return voice?.voice_id || null;
}
// Get character usage statistics
async getUsageStats(): Promise<any> {
try {
const usage = await this.client.usage.getUsage();
return usage;
} catch (error) {
console.warn('Could not fetch usage statistics');
return null;
}
}
// Test voice with sample text
async testVoice(voiceId: string, sampleText?: string): Promise<void> {
const text = sampleText || "This is a test of the voice quality and characteristics.";
await this.speakText(voiceId, text);
}
}
// Usage
const utils = new ElevenLabsUtils(client);
// Find and test a voice
const sarahVoiceId = await utils.findVoiceByName("Sarah");
if (sarahVoiceId) {
await utils.testVoice(sarahVoiceId);
await utils.generateToFile(sarahVoiceId, "Hello world!", "sarah_hello.mp3");
}stream() only works in Node.js environments