The official Node SDK for ElevenLabs providing comprehensive text-to-speech, voice synthesis, conversational AI, and audio processing capabilities
npx @tessl/cli install tessl/npm-elevenlabs--elevenlabs-js@2.24.0npm install @elevenlabs/elevenlabs-js
# Minimum Node: 18.0.0import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({
apiKey: "YOUR_API_KEY" // or process.env.ELEVENLABS_API_KEY
});interface ElevenLabsClient.Options {
apiKey?: string; // Required via option or ELEVENLABS_API_KEY env var
environment?: ElevenLabsEnvironment | string;
baseUrl?: string;
headers?: Record<string, string | null | undefined>;
}
enum ElevenLabsEnvironment {
Production = "https://api.elevenlabs.io",
ProductionUs = "https://api.us.elevenlabs.io",
ProductionEu = "https://api.eu.residency.elevenlabs.io",
ProductionIndia = "https://api.in.residency.elevenlabs.io"
}import {
ElevenLabsClient,
play,
stream,
RealtimeConnection,
RealtimeEvents,
AudioFormat,
CommitStrategy,
type ElevenLabs
} from "@elevenlabs/elevenlabs-js";
// Access types via namespace
const voice: ElevenLabs.Voice = ...;
const config: ElevenLabs.AgentConfig = ...;Import limitations: Message types (Config, InputAudioChunk, WordsItem, etc.) and specific error types (ErrorMessage, AuthErrorMessage, QuotaExceededErrorMessage) are not exported from package root.
class ElevenLabsClient {
readonly history: History;
readonly textToSpeech: TextToSpeech;
readonly textToDialogue: TextToDialogue;
readonly textToSoundEffects: TextToSoundEffects;
readonly textToVoice: TextToVoice;
readonly speechToSpeech: SpeechToSpeech;
readonly speechToText: SpeechToText; // Returns SpeechToText wrapper with .realtime
readonly voices: Voices;
readonly audioIsolation: AudioIsolation;
readonly forcedAlignment: ForcedAlignment;
readonly conversationalAi: ConversationalAi;
readonly music: Music; // Returns Music wrapper with multipart support
readonly dubbing: Dubbing;
readonly studio: Studio;
readonly models: Models;
readonly user: User;
readonly workspace: Workspace;
readonly usage: Usage;
readonly samples: Samples;
readonly audioNative: AudioNative;
readonly pronunciationDictionaries: PronunciationDictionaries;
readonly serviceAccounts: ServiceAccounts;
readonly webhooks: Webhooks;
readonly tokens: Tokens;
}interface RequestOptions {
timeoutInSeconds?: number; // Default: 60
maxRetries?: number; // Default: 2
abortSignal?: AbortSignal;
apiKey?: string; // Override client API key
queryParams?: Record<string, unknown>;
headers?: Record<string, string | null | undefined>;
}Auto-retry applies to: 408 (timeout), 409 (conflict), 429 (rate limit), 5XX (server errors).
// Basic TTS
const audio = await client.textToSpeech.convert("voiceId", {
text: "Hello",
modelId: "eleven_multilingual_v2"
});
// Streaming TTS
const stream = await client.textToSpeech.stream("voiceId", {
text: "Hello",
modelId: "eleven_flash_v2_5"
});
// Voice cloning
const voice = await client.voices.add({
name: "Custom Voice",
files: [audioFile1, audioFile2]
});
// Realtime transcription
const connection = await client.speechToText.realtime.connect({
audioFormat: AudioFormat.PCM_16000,
language: "en"
});
connection.on(RealtimeEvents.COMMITTED_TRANSCRIPT, (msg) => {
console.log(msg.text);
});
connection.send({ audioBase64: base64Audio });import { ElevenLabsClient, ElevenLabsError } from "@elevenlabs/elevenlabs-js";
import * as fs from "fs";
const client = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY
});
async function generateAudiobook(text: string, outputPath: string) {
try {
// Get available voices
const voices = await client.voices.getAll();
const narratorVoice = voices.voices.find(v =>
v.name?.toLowerCase().includes("narrator")
);
if (!narratorVoice) {
throw new Error("Narrator voice not found");
}
// Split long text into chunks (respecting max length)
const maxLength = 5000; // Conservative limit
const chunks = text.match(new RegExp(`.{1,${maxLength}}(?=\\s|$)`, 'g')) || [];
const audioChunks: Buffer[] = [];
// Generate audio for each chunk with continuity
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
const previousText = i > 0 ? chunks[i - 1].slice(-200) : undefined;
const nextText = i < chunks.length - 1 ? chunks[i + 1].slice(0, 200) : undefined;
const audioStream = await client.textToSpeech.convert(
narratorVoice.voiceId,
{
text: chunk,
modelId: "eleven_multilingual_v2",
previousText,
nextText,
voiceSettings: {
stability: 0.7, // Higher stability for consistent narration
similarityBoost: 0.75,
useSpeakerBoost: true
},
outputFormat: "mp3_44100_128"
},
{
timeoutInSeconds: 120, // Longer timeout for longer text
maxRetries: 3
}
);
// Collect audio chunks
const reader = audioStream.getReader();
const chunks: Uint8Array[] = [];
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(value);
}
audioChunks.push(Buffer.concat(chunks));
// Small delay to avoid rate limiting
await new Promise(resolve => setTimeout(resolve, 100));
}
// Combine all chunks
const finalAudio = Buffer.concat(audioChunks);
fs.writeFileSync(outputPath, finalAudio);
console.log(`Audiobook saved to ${outputPath}`);
} catch (error) {
if (error instanceof ElevenLabsError) {
if (error.statusCode === 429) {
console.error("Rate limit exceeded. Consider implementing exponential backoff.");
} else if (error.statusCode === 422) {
console.error("Invalid request:", error.body);
} else {
console.error(`API error ${error.statusCode}:`, error.message);
}
} else {
console.error("Unexpected error:", error);
}
throw error;
}
}import { createReadStream } from "fs";
import * as fs from "fs";
import { ElevenLabsClient, ElevenLabsError } from "@elevenlabs/elevenlabs-js";
async function createHighQualityVoice(name: string, samplePaths: string[]) {
const client = new ElevenLabsClient({
apiKey: process.env.ELEVENLABS_API_KEY
});
try {
// Validate samples before uploading
const samples = samplePaths.map(path => {
const stats = fs.statSync(path);
if (stats.size > 10 * 1024 * 1024) { // 10MB limit
throw new Error(`Sample ${path} exceeds 10MB limit`);
}
return createReadStream(path);
});
if (samples.length < 1 || samples.length > 25) {
throw new Error("Need 1-25 samples for voice cloning");
}
// Create voice with noise removal for better quality
const voice = await client.voices.ivc.create({
name,
files: samples,
removeBackgroundNoise: true, // Only if samples have background noise
description: `High-quality voice clone created programmatically`,
labels: JSON.stringify({
created_via: "sdk",
sample_count: samples.length,
created_at: new Date().toISOString()
})
});
// Test the voice with a sample generation
const testAudio = await client.textToSpeech.convert(voice.voiceId, {
text: "This is a test of the cloned voice quality.",
modelId: "eleven_multilingual_v2"
});
// Verify audio was generated
const reader = testAudio.getReader();
const { done, value } = await reader.read();
if (done || !value || value.length === 0) {
throw new Error("Voice test failed - no audio generated");
}
console.log(`Voice ${voice.voiceId} created and tested successfully`);
return voice;
} catch (error) {
if (error instanceof ElevenLabsError) {
if (error.statusCode === 400) {
console.error("Invalid sample format or quality");
} else if (error.statusCode === 413) {
console.error("Sample files too large");
}
}
throw error;
}
}async function streamWithProgress(voiceId: string, text: string) {
const stream = await client.textToSpeech.stream(voiceId, {
text,
modelId: "eleven_flash_v2_5",
optimizeStreamingLatency: 3 // Maximum latency optimization
});
let totalBytes = 0;
const chunks: Uint8Array[] = [];
const reader = stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
chunks.push(value);
totalBytes += value.length;
console.log(`Received ${totalBytes} bytes so far...`);
}
const audio = Buffer.concat(chunks);
console.log(`Complete audio: ${audio.length} bytes`);
return audio;
}class ElevenLabsError extends Error {
readonly statusCode?: number;
readonly body?: unknown;
readonly rawResponse?: RawResponse;
constructor(options: {
message?: string;
statusCode?: number;
body?: unknown;
rawResponse?: RawResponse;
});
}
class ElevenLabsTimeoutError extends ElevenLabsError {
constructor(message: string);
}
interface RawResponse {
statusCode: number;
headers: Record<string, string>;
}The SDK throws ElevenLabsError instances for API errors. While the SDK internally uses specific error types for different HTTP status codes (BadRequestError, UnauthorizedError, ForbiddenError, NotFoundError, UnprocessableEntityError), these are not exported from the package. All API errors should be caught as ElevenLabsError instances and the error message or status code examined to determine the specific error type.
The SDK automatically retries failed requests (up to 2 times by default) for:
import { ElevenLabsClient, ElevenLabsError, ElevenLabsTimeoutError } from "@elevenlabs/elevenlabs-js";
// Basic error handling
try {
const audio = await client.textToSpeech.convert("voiceId", {
text: "Hello world",
modelId: "eleven_multilingual_v2"
});
} catch (error) {
if (error instanceof ElevenLabsError) {
console.error(`API error ${error.statusCode}: ${error.message}`);
// Handle specific status codes
if (error.statusCode === 401) {
console.error("Invalid API key");
} else if (error.statusCode === 429) {
console.error("Rate limit exceeded - request will be retried automatically");
} else if (error.statusCode === 422) {
console.error("Validation error:", error.body);
}
} else {
// Non-API errors (network, etc.)
console.error("Unexpected error:", error);
}
}
// Timeout handling
try {
const audio = await client.textToSpeech.convert("voiceId", {
text: "Very long text...",
}, {
timeoutInSeconds: 30
});
} catch (error) {
if (error instanceof ElevenLabsTimeoutError) {
console.error("Request timed out - consider using streaming for long text");
}
}
// Retry configuration for critical operations
try {
const voice = await client.voices.get("voiceId", {
maxRetries: 5, // Increase retries for critical operations
timeoutInSeconds: 120
});
} catch (error) {
if (error instanceof ElevenLabsError) {
// Log full error details for debugging
console.error("Full error details:", {
statusCode: error.statusCode,
message: error.message,
body: error.body,
headers: error.rawResponse?.headers
});
}
}function play(audio: AsyncIterable<Uint8Array>): Promise<void>; // Requires ffplay
function stream(audio: ReadableStream<Uint8Array>): Promise<void>; // Requires mpvinterface VoiceSettings {
stability?: number; // 0-1, voice consistency
similarityBoost?: number; // 0-1, voice similarity to original
style?: number; // 0-1, exaggeration level
useSpeakerBoost?: boolean; // Boost speaker similarity (increases latency)
speed?: number; // 1.0 = default, <1 = slower, >1 = faster
}
interface PronunciationDictionaryLocator {
pronunciationDictionaryId: string;
versionId?: string; // Latest version if omitted
}
// Model information
interface Model {
modelId: string;
name: string;
canBeFinetuned: boolean;
canDoTextToSpeech: boolean;
canDoVoiceConversion: boolean;
canUseStyle: boolean;
canUseSpeakerBoost: boolean;
servesProVoices: boolean;
tokenCostFactor: number;
description?: string;
requiresAlphaAccess?: boolean;
maxCharactersRequestFreeUser?: number;
maxCharactersRequestSubscribedUser?: number;
maximumTextLengthPerRequest?: number;
languages?: Language[];
}
interface Language {
languageId: string;
name: string;
}
// File upload type
type Uploadable = File | Blob | ReadableStream;
// Output format options
type OutputFormat =
| "mp3_22050_32" | "mp3_24000_48" | "mp3_44100_32" | "mp3_44100_64"
| "mp3_44100_96" | "mp3_44100_128" | "mp3_44100_192" // 192 requires Creator+
| "pcm_8000" | "pcm_16000" | "pcm_22050" | "pcm_24000"
| "pcm_32000" | "pcm_44100" | "pcm_48000" // 44.1kHz+ requires Pro+
| "ulaw_8000" // For Twilio
| "alaw_8000"
| "opus_48000_32" | "opus_48000_64" | "opus_48000_96"
| "opus_48000_128" | "opus_48000_192";Node.js 18+, Vercel Edge, Cloudflare Workers, Deno v1.25+, Bun 1.0+
node-fetch - HTTP client (Node.js only)ws - WebSocket clientcommand-exists - System command detection