Application Performance Monitoring (APM) agent for Node.js applications with transaction tracing, error tracking, custom metrics, and distributed tracing capabilities.
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Specialized monitoring capabilities for Large Language Model applications including token counting and feedback tracking.
Record feedback events for LLM interactions in AI monitoring.
/**
* Record LLM feedback event for AI monitoring. Requires ai_monitoring.enabled: true
* @param {object} params - Feedback parameters
* @param {string} params.traceId - Trace ID from getTraceMetadata()
* @param {string} params.category - Feedback category/tag
* @param {string} params.rating - Feedback rating
* @param {string} [params.message] - Optional feedback message
* @param {object} [params.metadata] - Additional metadata
*/
function recordLlmFeedbackEvent(params);Register callback for calculating token counts when content recording is disabled.
/**
* Register callback for LLM token count calculation
* @param {Function} callback - Synchronous function(model, content) returning token count
*/
function setLlmTokenCountCallback(callback);Run function with LLM-specific custom attributes in context.
/**
* Execute function with LLM custom attributes context
* @param {object} context - LLM attributes (keys prefixed with 'llm.')
* @param {Function} callback - Function to execute in context
* @returns {*} Result of callback execution
*/
function withLlmCustomAttributes(context, callback);Usage Examples:
const newrelic = require('newrelic');
// Record LLM feedback
const traceMetadata = newrelic.getTraceMetadata();
newrelic.recordLlmFeedbackEvent({
traceId: traceMetadata.traceId,
category: 'helpfulness',
rating: 'positive',
message: 'Very helpful response',
metadata: { userId: 'user123', sessionId: 'session456' }
});
// Set token counting callback
newrelic.setLlmTokenCountCallback((model, content) => {
// Custom token counting logic
return estimateTokens(model, content);
});
// LLM operation with custom attributes
const response = await newrelic.withLlmCustomAttributes({
'llm.model': 'gpt-4',
'llm.temperature': 0.7,
'llm.maxTokens': 1000
}, async () => {
return await callLLMAPI(prompt);
});