Access conversation analytics and metrics with LLM usage tracking.
import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({ apiKey: "your-api-key" });
// Access this API via: client.conversationalAi.analytics/**
* Get live conversation count
*/
client.conversationalAi.analytics.liveCount.get(
request?: LiveCountGetRequest,
requestOptions?: RequestOptions
): HttpResponsePromise<GetLiveCountResponseModel>;Track and calculate LLM usage costs for workspace and agents.
/**
* Calculate expected LLM token usage and costs
* Returns a list of LLM models and expected cost based on provided values
*/
client.conversationalAi.llmUsage.calculate(
request: LlmUsageCalculatorPublicRequestModel,
requestOptions?: RequestOptions
): HttpResponsePromise<LlmUsageCalculatorResponseModel>;
/**
* Calculate expected LLM tokens for a specific agent
* Calculates expected number of LLM tokens needed for the specified agent
*/
client.conversationalAi.agents.llmUsage.calculate(
agent_id: string,
request?: LlmUsageCalculatorRequestModel,
requestOptions?: RequestOptions
): HttpResponsePromise<LlmUsageCalculatorResponseModel>;
/**
* Get agent share link
* Get the current link used to share the agent with others
*/
client.conversationalAi.agents.link.get(
agent_id: string,
requestOptions?: RequestOptions
): HttpResponsePromise<GetAgentLinkResponseModel>;
interface LlmUsageCalculatorPublicRequestModel {
/** Length of the prompt in characters */
promptLength: number;
/** Number of knowledge base pages */
numberOfPages: number;
/** Whether RAG is enabled */
ragEnabled: boolean;
}
interface LlmUsageCalculatorRequestModel {
/** Optional configuration overrides */
[key: string]: unknown;
}import { ElevenLabsClient } from "@elevenlabs/elevenlabs-js";
const client = new ElevenLabsClient({ apiKey: "your-api-key" });
// Get live conversation count
const liveCount = await client.conversationalAi.analytics.liveCount.get({
agent_id: agent.agent_id,
});
console.log("Active conversations:", liveCount.count);