0
# LLM & AI Monitoring
1
2
Specialized monitoring capabilities for Large Language Model applications including token counting and feedback tracking.
3
4
## Capabilities
5
6
### Record LLM Feedback Event
7
8
Record feedback events for LLM interactions in AI monitoring.
9
10
```javascript { .api }
11
/**
12
* Record LLM feedback event for AI monitoring. Requires ai_monitoring.enabled: true
13
* @param {object} params - Feedback parameters
14
* @param {string} params.traceId - Trace ID from getTraceMetadata()
15
* @param {string} params.category - Feedback category/tag
16
* @param {string} params.rating - Feedback rating
17
* @param {string} [params.message] - Optional feedback message
18
* @param {object} [params.metadata] - Additional metadata
19
*/
20
function recordLlmFeedbackEvent(params);
21
```
22
23
### Set LLM Token Count Callback
24
25
Register callback for calculating token counts when content recording is disabled.
26
27
```javascript { .api }
28
/**
29
* Register callback for LLM token count calculation
30
* @param {Function} callback - Synchronous function(model, content) returning token count
31
*/
32
function setLlmTokenCountCallback(callback);
33
```
34
35
### Execute with LLM Custom Attributes
36
37
Run function with LLM-specific custom attributes in context.
38
39
```javascript { .api }
40
/**
41
* Execute function with LLM custom attributes context
42
* @param {object} context - LLM attributes (keys prefixed with 'llm.')
43
* @param {Function} callback - Function to execute in context
44
* @returns {*} Result of callback execution
45
*/
46
function withLlmCustomAttributes(context, callback);
47
```
48
49
**Usage Examples:**
50
51
```javascript
52
const newrelic = require('newrelic');
53
54
// Record LLM feedback
55
const traceMetadata = newrelic.getTraceMetadata();
56
newrelic.recordLlmFeedbackEvent({
57
traceId: traceMetadata.traceId,
58
category: 'helpfulness',
59
rating: 'positive',
60
message: 'Very helpful response',
61
metadata: { userId: 'user123', sessionId: 'session456' }
62
});
63
64
// Set token counting callback
65
newrelic.setLlmTokenCountCallback((model, content) => {
66
// Custom token counting logic
67
return estimateTokens(model, content);
68
});
69
70
// LLM operation with custom attributes
71
const response = await newrelic.withLlmCustomAttributes({
72
'llm.model': 'gpt-4',
73
'llm.temperature': 0.7,
74
'llm.maxTokens': 1000
75
}, async () => {
76
return await callLLMAPI(prompt);
77
});
78
```