Official Node.js SDK for ElevenLabs text-to-speech API with voice synthesis, real-time transcription, music generation, and conversational AI
Overall
score
86%
Evaluation — 86%
↑ 1.06xAgent success when using this tile
Access both parsed API responses and raw HTTP responses for advanced use cases. All API methods return HttpResponsePromise which provides transparent access to underlying response data.
import { HttpResponsePromise } from "@elevenlabs/elevenlabs-js";
// All API methods return HttpResponsePromise<T>All API methods return HttpResponsePromise<T> which extends Promise<T> with additional functionality for accessing raw HTTP responses.
/**
* A promise that returns parsed response and provides access to raw HTTP response
*/
class HttpResponsePromise<T> extends Promise<T> {
/**
* Retrieves both the parsed data and the raw HTTP response
* @returns Promise resolving to data and rawResponse
*/
async withRawResponse(): Promise<{ data: T; rawResponse: Response }>;
}import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({ apiKey: "your-api-key" });
// Standard usage - get parsed data only
const audio = await client.textToSpeech.convert("voice-id", {
text: "Hello, world!",
});
// Process audio stream
for await (const chunk of audio) {
// Handle audio chunks
}// Get both parsed data and raw HTTP response
const { data, rawResponse } = await client.textToSpeech
.convert("voice-id", { text: "Hello" })
.withRawResponse();
// Access HTTP response properties
console.log("Status:", rawResponse.status);
console.log("Status Text:", rawResponse.statusText);
console.log("Headers:", rawResponse.headers);
console.log("URL:", rawResponse.url);
// Process parsed data
for await (const chunk of data) {
// Handle audio chunks
}// Inspect response headers
const { data, rawResponse } = await client.voices
.getAll()
.withRawResponse();
// Access specific headers
const rateLimit = rawResponse.headers.get("X-RateLimit-Limit");
const remaining = rawResponse.headers.get("X-RateLimit-Remaining");
const requestId = rawResponse.headers.get("X-Request-ID");
console.log(`Rate limit: ${remaining}/${rateLimit}`);
console.log(`Request ID: ${requestId}`);
// Process voice data
console.log("Voices:", data.voices);// Check response status code
const { data, rawResponse } = await client.user
.get()
.withRawResponse();
if (rawResponse.status === 200) {
console.log("User data:", data);
} else if (rawResponse.status === 304) {
console.log("Not modified - use cached data");
}// Verify response content type
const { data, rawResponse } = await client.textToSpeech
.convert("voice-id", { text: "Hello" })
.withRawResponse();
const contentType = rawResponse.headers.get("Content-Type");
console.log("Content-Type:", contentType);
if (contentType?.includes("audio/mpeg")) {
console.log("Received MP3 audio");
// Process audio stream
for await (const chunk of data) {
// Handle MP3 chunks
}
}// Measure response time
const startTime = Date.now();
const { data, rawResponse } = await client.textToSpeech
.convert("voice-id", { text: "Performance test" })
.withRawResponse();
const endTime = Date.now();
const duration = endTime - startTime;
console.log(`Request completed in ${duration}ms`);
console.log(`Status: ${rawResponse.status}`);// Check caching headers
const { data, rawResponse } = await client.voices
.get("voice-id")
.withRawResponse();
const cacheControl = rawResponse.headers.get("Cache-Control");
const etag = rawResponse.headers.get("ETag");
const expires = rawResponse.headers.get("Expires");
console.log("Cache-Control:", cacheControl);
console.log("ETag:", etag);
console.log("Expires:", expires);// Process response based on headers
async function generateWithMetadata(
voiceId: string,
text: string
): Promise<{
audio: ReadableStream<Uint8Array>;
metadata: Record<string, string>;
}> {
const { data, rawResponse } = await client.textToSpeech
.convert(voiceId, { text })
.withRawResponse();
// Extract metadata from headers
const metadata: Record<string, string> = {};
rawResponse.headers.forEach((value, key) => {
if (key.startsWith("x-")) {
metadata[key] = value;
}
});
return { audio: data, metadata };
}
const result = await generateWithMetadata("voice-id", "Hello");
console.log("Audio metadata:", result.metadata);// Handle different response scenarios
const { data, rawResponse } = await client.history
.list()
.withRawResponse();
if (rawResponse.status === 200) {
console.log("History items:", data.history);
} else if (rawResponse.status === 204) {
console.log("No history items available");
} else {
console.log("Unexpected status:", rawResponse.status);
}// Log response details for debugging
async function generateWithLogging(
voiceId: string,
text: string
): Promise<ReadableStream<Uint8Array>> {
const { data, rawResponse } = await client.textToSpeech
.convert(voiceId, { text })
.withRawResponse();
// Log response details
console.log("Response Details:");
console.log(" Status:", rawResponse.status, rawResponse.statusText);
console.log(" URL:", rawResponse.url);
console.log(" Headers:");
rawResponse.headers.forEach((value, key) => {
console.log(` ${key}: ${value}`);
});
return data;
}// Analyze error responses
try {
const audio = await client.textToSpeech.convert("invalid-voice", {
text: "Test",
});
} catch (error) {
if (error.rawResponse) {
console.log("Error response status:", error.rawResponse.status);
console.log("Error response headers:");
error.rawResponse.headers.forEach((value, key) => {
console.log(` ${key}: ${value}`);
});
// Check for retry-after header
const retryAfter = error.rawResponse.headers.get("Retry-After");
if (retryAfter) {
console.log(`Retry after ${retryAfter} seconds`);
}
}
}// Track pagination metadata
async function getAllVoicesWithMetadata(): Promise<{
voices: Voice[];
requestId: string;
timestamp: string;
}> {
const { data, rawResponse } = await client.voices
.search({ page_size: 100 })
.withRawResponse();
return {
voices: data.voices,
requestId: rawResponse.headers.get("X-Request-ID") || "",
timestamp: rawResponse.headers.get("Date") || "",
};
}// Clone raw response for multiple processing
const { data, rawResponse } = await client.voices
.getAll()
.withRawResponse();
// Clone response for separate processing
const clonedResponse = rawResponse.clone();
// Process original data
console.log("Voices:", data.voices);
// Use cloned response for additional processing
const bodyText = await clonedResponse.text();
console.log("Raw response body length:", bodyText.length);// Extract custom headers into structured format
interface ResponseMetadata {
requestId: string;
rateLimit: {
limit: number;
remaining: number;
reset: number;
};
server: string;
}
async function fetchWithMetadata<T>(
promise: HttpResponsePromise<T>
): Promise<{ data: T; metadata: ResponseMetadata }> {
const { data, rawResponse } = await promise.withRawResponse();
const metadata: ResponseMetadata = {
requestId: rawResponse.headers.get("X-Request-ID") || "",
rateLimit: {
limit: parseInt(rawResponse.headers.get("X-RateLimit-Limit") || "0"),
remaining: parseInt(rawResponse.headers.get("X-RateLimit-Remaining") || "0"),
reset: parseInt(rawResponse.headers.get("X-RateLimit-Reset") || "0"),
},
server: rawResponse.headers.get("Server") || "",
};
return { data, metadata };
}
// Usage
const result = await fetchWithMetadata(
client.voices.getAll()
);
console.log("Rate limit:", result.metadata.rateLimit);
console.log("Request ID:", result.metadata.requestId);// Use withRawResponse() when you need:
// 1. Response headers
const { rawResponse } = await client.voices.getAll().withRawResponse();
const requestId = rawResponse.headers.get("X-Request-ID");
// 2. Status code information
const { rawResponse } = await client.user.get().withRawResponse();
console.log("Status:", rawResponse.status);
// 3. Rate limit tracking
const { rawResponse } = await client.textToSpeech
.convert("voice-id", { text: "Hello" })
.withRawResponse();
const remaining = rawResponse.headers.get("X-RateLimit-Remaining");
// 4. Debugging
const { data, rawResponse } = await client.voices
.get("voice-id")
.withRawResponse();
console.log("Response URL:", rawResponse.url);
console.log("Response type:", rawResponse.type);// For most use cases, direct await is sufficient
const voices = await client.voices.getAll();
const audio = await client.textToSpeech.convert("voice-id", { text: "Hello" });
const user = await client.user.get();
// Only use withRawResponse() when you specifically need HTTP response details