List and explore available AI models for voice synthesis. Different models offer various capabilities including multilingual support, streaming optimization, and quality levels.
import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({ apiKey: "your-api-key" });
// Access this API via: client.modelsGet list of all available models.
/**
* @param requestOptions - Optional request configuration
* @returns Array of available models
* @throws UnprocessableEntityError if request fails
*/
client.models.list(
requestOptions?: RequestOptions
): HttpResponsePromise<Model[]>;
interface Model {
/** Model ID */
model_id: string;
/** Model name */
name: string;
/** Whether model can be fine-tuned */
can_be_finetuned: boolean;
/** Whether model can do text-to-speech */
can_do_text_to_speech: boolean;
/** Whether model can do voice conversion */
can_do_voice_conversion: boolean;
/** Whether model supports audio streaming */
can_use_speaker_boost: boolean;
/** Whether model supports style parameter */
can_use_style: boolean;
/** Supported languages */
languages: Language[];
/** Maximum characters per request */
max_characters_request_free_user?: number;
/** Maximum characters per request (subscribed) */
max_characters_request_subscribed_user?: number;
/** Token cost multiplier */
serves_pro_voices?: boolean;
/** Description of model capabilities */
description?: string;
}
interface Language {
/** Language code (e.g., "en", "es") */
language_id: string;
/** Language name */
name: string;
}import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({ apiKey: "your-api-key" });
// Get all available models
const models = await client.models.list();
console.log(`Found ${models.length} models:\n`);
for (const model of models) {
console.log(`${model.name} (${model.model_id})`);
console.log(` Description: ${model.description || "N/A"}`);
console.log(` Text-to-Speech: ${model.can_do_text_to_speech}`);
console.log(` Voice Conversion: ${model.can_do_voice_conversion}`);
console.log(` Speaker Boost: ${model.can_use_speaker_boost}`);
console.log(` Style Control: ${model.can_use_style}`);
console.log(` Languages: ${model.languages.length}`);
console.log();
}// Find models that support multiple languages
const models = await client.models.list();
const multilingualModels = models.filter(
model => model.languages.length > 1
);
console.log("Multilingual models:");
for (const model of multilingualModels) {
console.log(`${model.name}:`);
console.log(` Supports ${model.languages.length} languages`);
console.log(` Languages: ${model.languages.map(l => l.name).join(", ")}`);
}// Find models that support style control
const models = await client.models.list();
const styleModels = models.filter(model => model.can_use_style);
console.log("Models with style control:");
for (const model of styleModels) {
console.log(`- ${model.name} (${model.model_id})`);
}// Check which models support a specific language
async function findModelsForLanguage(
languageCode: string
): Promise<Model[]> {
const models = await client.models.list();
return models.filter(model =>
model.languages.some(lang => lang.language_id === languageCode)
);
}
const spanishModels = await findModelsForLanguage("es");
console.log(`Models supporting Spanish: ${spanishModels.length}`);// Compare model capabilities
async function compareModels(): Promise<void> {
const models = await client.models.list();
console.log("Model Comparison:");
console.log("Name | TTS | VC | Boost | Style | Languages");
console.log("-----|-----|----| ------|-------|----------");
for (const model of models) {
console.log(
`${model.name.padEnd(20)} | ` +
`${model.can_do_text_to_speech ? "✓" : "✗"} | ` +
`${model.can_do_voice_conversion ? "✓" : "✗"} | ` +
`${model.can_use_speaker_boost ? "✓" : "✗"} | ` +
`${model.can_use_style ? "✓" : "✗"} | ` +
`${model.languages.length}`
);
}
}
await compareModels();// Find specific model by ID
async function getModelById(modelId: string): Promise<Model | undefined> {
const models = await client.models.list();
return models.find(m => m.model_id === modelId);
}
const turboModel = await getModelById("eleven_turbo_v2_5");
if (turboModel) {
console.log("Found:", turboModel.name);
console.log("Description:", turboModel.description);
}// Select model based on requirements
interface ModelRequirements {
multilingual?: boolean;
voiceConversion?: boolean;
styleControl?: boolean;
speakerBoost?: boolean;
language?: string;
}
async function selectModel(
requirements: ModelRequirements
): Promise<Model[]> {
const models = await client.models.list();
return models.filter(model => {
if (requirements.multilingual && model.languages.length <= 1) {
return false;
}
if (requirements.voiceConversion && !model.can_do_voice_conversion) {
return false;
}
if (requirements.styleControl && !model.can_use_style) {
return false;
}
if (requirements.speakerBoost && !model.can_use_speaker_boost) {
return false;
}
if (requirements.language) {
const hasLanguage = model.languages.some(
lang => lang.language_id === requirements.language
);
if (!hasLanguage) return false;
}
return true;
});
}
// Find models for multilingual TTS with style control
const suitableModels = await selectModel({
multilingual: true,
styleControl: true,
language: "en",
});
console.log("Suitable models:");
for (const model of suitableModels) {
console.log(`- ${model.name}`);
}// Check character limits for models
const models = await client.models.list();
console.log("Model Character Limits:");
for (const model of models) {
if (model.max_characters_request_subscribed_user) {
console.log(`${model.name}:`);
console.log(` Free: ${model.max_characters_request_free_user || "N/A"}`);
console.log(` Subscribed: ${model.max_characters_request_subscribed_user}`);
}
}// Group models by their capabilities
async function groupModelsByCapability(): Promise<void> {
const models = await client.models.list();
const groups = {
tts: models.filter(m => m.can_do_text_to_speech),
voiceConversion: models.filter(m => m.can_do_voice_conversion),
multilingual: models.filter(m => m.languages.length > 1),
style: models.filter(m => m.can_use_style),
speakerBoost: models.filter(m => m.can_use_speaker_boost),
};
console.log("Models by Capability:");
console.log(`Text-to-Speech: ${groups.tts.length}`);
console.log(`Voice Conversion: ${groups.voiceConversion.length}`);
console.log(`Multilingual: ${groups.multilingual.length}`);
console.log(`Style Control: ${groups.style.length}`);
console.log(`Speaker Boost: ${groups.speakerBoost.length}`);
}
await groupModelsByCapability();// Get all unique languages supported across models
async function getAllSupportedLanguages(): Promise<Language[]> {
const models = await client.models.list();
const languageMap = new Map<string, Language>();
for (const model of models) {
for (const lang of model.languages) {
if (!languageMap.has(lang.language_id)) {
languageMap.set(lang.language_id, lang);
}
}
}
return Array.from(languageMap.values());
}
const languages = await getAllSupportedLanguages();
console.log(`Total supported languages: ${languages.length}`);
console.log(languages.map(l => `${l.name} (${l.language_id})`).join(", "));// Helper to select appropriate model for TTS
async function selectModelForTTS(
options: {
language?: string;
style?: boolean;
multilingual?: boolean;
}
): Promise<string> {
const models = await client.models.list();
// Filter by requirements
let candidates = models.filter(m => m.can_do_text_to_speech);
if (options.style) {
candidates = candidates.filter(m => m.can_use_style);
}
if (options.multilingual) {
candidates = candidates.filter(m => m.languages.length > 1);
}
if (options.language) {
candidates = candidates.filter(m =>
m.languages.some(l => l.language_id === options.language)
);
}
if (candidates.length === 0) {
throw new Error("No model found matching requirements");
}
// Return first matching model
return candidates[0].model_id;
}
// Example usage
const modelId = await selectModelForTTS({
language: "en",
style: true,
multilingual: true,
});
console.log("Selected model:", modelId);// Cache models list to avoid repeated API calls
class ModelCache {
private cache: Model[] | null = null;
private cacheTime: number = 0;
private cacheDuration = 3600000; // 1 hour
async getModels(): Promise<Model[]> {
const now = Date.now();
if (this.cache && (now - this.cacheTime) < this.cacheDuration) {
return this.cache;
}
this.cache = await client.models.list();
this.cacheTime = now;
return this.cache;
}
clearCache(): void {
this.cache = null;
this.cacheTime = 0;
}
}
const modelCache = new ModelCache();
// Use cached models
const models = await modelCache.getModels();