docs
tessl install tessl/pypi-pipecat-ai@0.0.0An open source framework for building real-time voice and multimodal conversational AI agents with support for speech-to-text, text-to-speech, LLMs, and multiple transport protocols
LLM context frames manage conversation context, messages, tools, and LLM execution control.
{ .api }
from pipecat.frames.frames import LLMContextFrame
class LLMContextFrame(Frame):
"""Universal LLM context container.
Carries the complete LLM context including messages,
tools, and settings. This is the modern, provider-agnostic
context representation.
Attributes:
context (LLMContext): The LLM context object
id (int): Frame identifier
name (str): Frame name
pts (Optional[int]): Presentation timestamp
metadata (Dict[str, Any]): Frame metadata
"""
def __init__(self, context: "LLMContext"):
"""Initialize LLM context frame.
Args:
context: LLMContext object with messages, tools, settings
"""
super().__init__()
self.context = context{ .api }
from pipecat.processors.aggregators.llm_context import LLMContext
class LLMContext:
"""Universal LLM context.
Provider-agnostic container for LLM conversation context.
Supports messages, tools/functions, and provider-specific settings.
Attributes:
messages (List[Dict[str, Any]]): Conversation messages
tools (List[Dict[str, Any]]): Available tools/functions
settings (Dict[str, Any]): LLM settings (temperature, etc.)
"""
def __init__(
self,
messages: Optional[List[Dict[str, Any]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
settings: Optional[Dict[str, Any]] = None
):
"""Initialize LLM context.
Args:
messages: Initial conversation messages
tools: Available tools/functions
settings: LLM configuration settings
"""
self.messages = messages or []
self.tools = tools or []
self.settings = settings or {}
self.tool_choice = None
def add_message(self, message: Dict[str, Any]):
"""Add message to context.
Args:
message: Message dict with 'role' and 'content'
"""
self.messages.append(message)
def add_messages(self, messages: List[Dict[str, Any]]):
"""Add multiple messages to context.
Args:
messages: List of message dicts
"""
self.messages.extend(messages)
def set_tool_choice(self, tool_choice: str | Dict[str, Any]):
"""Set tool choice strategy.
Args:
tool_choice: Tool choice ("auto", "required", {"type": "function", "function": {"name": "..."}})
"""
self.tool_choice = tool_choice
def get_messages(self) -> List[Dict[str, Any]]:
"""Get all messages.
Returns:
List of messages
"""
return self.messages
def get_tools(self) -> List[Dict[str, Any]]:
"""Get all tools.
Returns:
List of tool definitions
"""
return self.tools{ .api }
from pipecat.frames.frames import LLMMessagesAppendFrame
class LLMMessagesAppendFrame(DataFrame):
"""Append messages to LLM context.
Adds new messages to the existing context without
replacing the entire context.
Attributes:
messages (List[Dict[str, Any]]): Messages to append
"""
def __init__(self, messages: List[Dict[str, Any]]):
"""Initialize append frame.
Args:
messages: List of messages to append
Example:
frame = LLMMessagesAppendFrame(messages=[
{"role": "user", "content": "Hello"}
])
"""
super().__init__()
self.messages = messages{ .api }
from pipecat.frames.frames import LLMMessagesUpdateFrame
class LLMMessagesUpdateFrame(DataFrame):
"""Update context messages.
Replaces the entire message history in the context.
Attributes:
messages (List[Dict[str, Any]]): New message list
"""
def __init__(self, messages: List[Dict[str, Any]]):
"""Initialize update frame.
Args:
messages: Complete new message list
Example:
frame = LLMMessagesUpdateFrame(messages=[
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Hi"}
])
"""
super().__init__()
self.messages = messages{ .api }
from pipecat.frames.frames import LLMSetToolsFrame
class LLMSetToolsFrame(DataFrame):
"""Set available tools/functions.
Updates the list of tools/functions available to the LLM.
Attributes:
tools (List[Dict[str, Any]]): Tool/function definitions
"""
def __init__(self, tools: List[Dict[str, Any]]):
"""Initialize set tools frame.
Args:
tools: List of tool definitions (OpenAI format)
Example:
frame = LLMSetToolsFrame(tools=[
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
}
}
}
}
])
"""
super().__init__()
self.tools = tools{ .api }
from pipecat.frames.frames import LLMSetToolChoiceFrame
class LLMSetToolChoiceFrame(DataFrame):
"""Set tool choice strategy.
Controls how the LLM should use tools.
Attributes:
tool_choice (str | Dict[str, Any]): Tool choice strategy
"""
def __init__(self, tool_choice: str | Dict[str, Any]):
"""Initialize tool choice frame.
Args:
tool_choice: "auto", "required", "none", or specific tool dict
Example:
# Auto tool use
frame = LLMSetToolChoiceFrame(tool_choice="auto")
# Force specific tool
frame = LLMSetToolChoiceFrame(tool_choice={
"type": "function",
"function": {"name": "get_weather"}
})
# Require tool use
frame = LLMSetToolChoiceFrame(tool_choice="required")
"""
super().__init__()
self.tool_choice = tool_choice{ .api }
from pipecat.frames.frames import LLMRunFrame
class LLMRunFrame(DataFrame):
"""Trigger LLM processing with current context.
Signals the LLM service to process the current context and generate a response.
This frame instructs the LLM to start inference with whatever context is currently
available.
Example:
# Set up context
await task.queue_frame(LLMMessagesAppendFrame(messages=[
{"role": "user", "content": "What is the weather?"}
]))
# Trigger LLM inference
await task.queue_frame(LLMRunFrame())
# LLM will emit:
# -> LLMFullResponseStartFrame
# -> LLMTextFrame chunks
# -> LLMFullResponseEndFrame
"""
pass{ .api }
from pipecat.frames.frames import LLMFullResponseStartFrame
class LLMFullResponseStartFrame(ControlFrame):
"""LLM response start.
Signals the beginning of a complete LLM response. Used to indicate the beginning
of an LLM response, which will be followed by one or more TextFrames and a final
LLMFullResponseEndFrame.
Attributes:
skip_tts (Optional[bool]): Whether this response should skip TTS synthesis (set after init)
Example:
# LLM starts responding
await self.push_frame(LLMFullResponseStartFrame())
await self.push_frame(LLMTextFrame(text="Hello, "))
await self.push_frame(LLMTextFrame(text="how can I help?"))
await self.push_frame(LLMFullResponseEndFrame())
# Skip TTS for this response
start_frame = LLMFullResponseStartFrame()
start_frame.skip_tts = True
await self.push_frame(start_frame)
"""
skip_tts: Optional[bool] = None{ .api }
from pipecat.frames.frames import LLMFullResponseEndFrame
class LLMFullResponseEndFrame(ControlFrame):
"""LLM response end.
Signals the end of a complete LLM response. Marks the completion of the LLM's
streaming response.
Attributes:
skip_tts (Optional[bool]): Whether this response should skip TTS synthesis (set after init)
Example:
# LLM finished responding
await self.push_frame(LLMFullResponseEndFrame())
# Skip TTS for this response
end_frame = LLMFullResponseEndFrame()
end_frame.skip_tts = True
await self.push_frame(end_frame)
"""
skip_tts: Optional[bool] = None{ .api }
from pipecat.frames.frames import VisionFullResponseStartFrame
class VisionFullResponseStartFrame(LLMFullResponseStartFrame):
"""Vision response start.
Signals the beginning of a vision/multimodal response.
"""
pass{ .api }
from pipecat.frames.frames import VisionFullResponseEndFrame
class VisionFullResponseEndFrame(LLMFullResponseEndFrame):
"""Vision response end.
Signals the end of a vision/multimodal response.
"""
pass{ .api }
from pipecat.frames.frames import LLMEnablePromptCachingFrame
class LLMEnablePromptCachingFrame(DataFrame):
"""Enable or disable prompt caching in LLMs.
Controls prompt caching for supported LLM providers (e.g., Anthropic prompt caching).
Prompt caching can reduce costs and latency by caching parts of the prompt that
don't change between requests.
Attributes:
enable (bool): Whether to enable prompt caching
Example:
# Enable caching for Anthropic
frame = LLMEnablePromptCachingFrame(enable=True)
await task.queue_frame(frame)
# Disable caching
frame = LLMEnablePromptCachingFrame(enable=False)
await task.queue_frame(frame)
"""
enable: bool{ .api }
from pipecat.frames.frames import LLMConfigureOutputFrame
class LLMConfigureOutputFrame(DataFrame):
"""Configure LLM output behavior.
Configures how the LLM produces output. For example, it can tell the LLM to
generate tokens that should be added to the context but not spoken by the TTS
service (if one is present in the pipeline).
Attributes:
skip_tts (bool): Whether LLM tokens should skip the TTS service (if any)
Example:
# Make LLM output skip TTS (internal thoughts, etc.)
frame = LLMConfigureOutputFrame(skip_tts=True)
await task.queue_frame(frame)
# Resume normal TTS output
frame = LLMConfigureOutputFrame(skip_tts=False)
await task.queue_frame(frame)
"""
skip_tts: bool{ .api }
from pipecat.frames.frames import LLMUpdateSettingsFrame
class LLMUpdateSettingsFrame(ControlFrame):
"""Update LLM settings.
Updates LLM service settings at runtime.
Attributes:
settings (Dict[str, Any]): New settings
"""
def __init__(self, settings: Dict[str, Any]):
"""Initialize settings update frame.
Args:
settings: Settings to update
Example:
frame = LLMUpdateSettingsFrame(settings={
"temperature": 0.9,
"max_tokens": 1000
})
"""
super().__init__()
self.settings = settingsLLM thoughts represent internal reasoning or chain-of-thought processing that occurs before or during response generation.
{ .api }
from pipecat.frames.frames import LLMThoughtStartFrame
class LLMThoughtStartFrame(ControlFrame):
"""Start of LLM thought.
Marks the beginning of an LLM thought sequence (chain-of-thought reasoning).
Used when LLMs expose their internal reasoning process.
Attributes:
append_to_context (bool): Whether the thought should be appended to the LLM context.
If it is appended, the `llm` field is required, since it will be appended as
an `LLMSpecificMessage`.
llm (Optional[str]): Optional identifier of the LLM provider for LLM-specific handling.
Only required if `append_to_context` is True, as the thought is appended to context
as an `LLMSpecificMessage`.
Example:
# Start thought without adding to context
await self.push_frame(LLMThoughtStartFrame())
# Start thought and add to context
await self.push_frame(LLMThoughtStartFrame(append_to_context=True, llm="anthropic"))
await self.push_frame(LLMThoughtTextFrame(text="Let me think about this..."))
await self.push_frame(LLMThoughtEndFrame())
"""
append_to_context: bool = False
llm: Optional[str] = None{ .api }
from pipecat.frames.frames import LLMThoughtTextFrame
class LLMThoughtTextFrame(DataFrame):
"""Text content of an LLM thought.
Contains the text (or text chunk) of an LLM thought. Note that despite containing
text, this is a DataFrame and not a TextFrame, to avoid typical text processing
such as TTS synthesis.
Attributes:
text (str): The text (or text chunk) of the thought
includes_inter_frame_spaces (bool): Whether text includes necessary spaces (set after init)
Example:
# Stream thought as text chunks
await self.push_frame(LLMThoughtStartFrame())
await self.push_frame(LLMThoughtTextFrame(text="First, I need to consider "))
await self.push_frame(LLMThoughtTextFrame(text="the user's previous context..."))
await self.push_frame(LLMThoughtEndFrame())
"""
text: str{ .api }
from pipecat.frames.frames import LLMThoughtEndFrame
class LLMThoughtEndFrame(ControlFrame):
"""End of LLM thought.
Marks the end of an LLM thought sequence. May include an optional signature
for providers that include signatures in their thought output.
Attributes:
signature (Any): Optional signature associated with the thought.
This is used by Anthropic, which includes a signature at the end of
each thought.
Example:
# Simple thought end
await self.push_frame(LLMThoughtEndFrame())
# With signature (Anthropic)
await self.push_frame(LLMThoughtEndFrame(signature=thought_signature))
"""
signature: Any = None{ .api }
from pipecat.frames.frames import FunctionCallFromLLM
from typing import Mapping, Any
@dataclass
class FunctionCallFromLLM:
"""Function call data from LLM.
Represents a function call returned by the LLM to be registered for execution.
This is a dataclass, not a frame - it's used within function calling frames
to carry function call information.
Attributes:
function_name (str): The name of the function to call
tool_call_id (str): A unique identifier for the function call
arguments (Mapping[str, Any]): The arguments to pass to the function
context (Any): The LLM context when the function call was made
Example:
from dataclasses import dataclass
# Created by LLM service when function call is detected
call = FunctionCallFromLLM(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "San Francisco"},
context=llm_context
){ .api }
from pipecat.frames.frames import FunctionCallResultFrame, FunctionCallResultProperties
class FunctionCallResultFrame(DataFrame, UninterruptibleFrame):
"""Function call result frame.
Contains the result of an LLM function call. This is an uninterruptible frame
because once a result is generated we always want to update the context.
Attributes:
function_name (str): Name of the function that was executed
tool_call_id (str): Unique identifier for the function call
arguments (Any): Arguments that were passed to the function
result (Any): The result returned by the function
run_llm (Optional[bool]): Whether to run the LLM after this result
properties (Optional[FunctionCallResultProperties]): Additional properties for result handling
Example:
# Success with result
frame = FunctionCallResultFrame(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "San Francisco"},
result={"temperature": 72, "condition": "sunny"}
)
# With run_llm to automatically trigger LLM after result
frame = FunctionCallResultFrame(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "San Francisco"},
result={"temperature": 72, "condition": "sunny"},
run_llm=True
)
# With properties for advanced control
properties = FunctionCallResultProperties(
run_llm=True,
on_context_updated=my_callback
)
frame = FunctionCallResultFrame(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "San Francisco"},
result={"temperature": 72, "condition": "sunny"},
properties=properties
)
"""
function_name: str
tool_call_id: str
arguments: Any
result: Any
run_llm: Optional[bool] = None
properties: Optional[FunctionCallResultProperties] = None{ .api }
from pipecat.frames.frames import FunctionCallResultProperties
from typing import Optional, Callable, Awaitable
from dataclasses import dataclass
@dataclass
class FunctionCallResultProperties:
"""Properties for configuring function call result behavior.
Controls how function call results are processed and whether the LLM should be
re-run after receiving the result. This dataclass is used as a parameter in
FunctionCallResultFrame.
Attributes:
run_llm (Optional[bool]): Whether to run the LLM after receiving this result
on_context_updated (Optional[Callable[[], Awaitable[None]]]): Callback to execute when context is updated
Example:
# Basic usage with run_llm
properties = FunctionCallResultProperties(run_llm=True)
# With callback
async def on_updated():
print("Context updated with function result")
properties = FunctionCallResultProperties(
run_llm=True,
on_context_updated=on_updated
)
# Use in FunctionCallResultFrame
frame = FunctionCallResultFrame(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "SF"},
result={"temp": 72},
properties=properties
)
"""
run_llm: Optional[bool] = None
on_context_updated: Optional[Callable[[], Awaitable[None]]] = None{ .api }
from pipecat.frames.frames import FunctionCallsStartedFrame, FunctionCallFromLLM
from typing import Sequence
class FunctionCallsStartedFrame(SystemFrame):
"""Function calls initiated.
A frame signaling that one or more function call execution is going to start.
Emitted before function execution begins to allow for tracking and coordination.
Attributes:
function_calls (Sequence[FunctionCallFromLLM]): Sequence of function calls that will be executed
Example:
# Single function call
frame = FunctionCallsStartedFrame(
function_calls=[
FunctionCallFromLLM(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "SF"},
context=ctx
)
]
)
# Multiple parallel function calls
frame = FunctionCallsStartedFrame(
function_calls=[
FunctionCallFromLLM(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "SF"},
context=ctx
),
FunctionCallFromLLM(
function_name="get_time",
tool_call_id="call_124",
arguments={},
context=ctx
)
]
)
"""
function_calls: Sequence[FunctionCallFromLLM]{ .api }
from pipecat.frames.frames import FunctionCallInProgressFrame
class FunctionCallInProgressFrame(ControlFrame, UninterruptibleFrame):
"""Function call currently executing.
Signals that a function call is currently executing. This is an uninterruptible
frame because we always want to update the context.
Attributes:
function_name (str): Name of the function being executed
tool_call_id (str): Unique identifier for this function call
arguments (Any): Arguments passed to the function
cancel_on_interruption (bool): Whether to cancel this call if interrupted
Example:
# Basic function call in progress
frame = FunctionCallInProgressFrame(
function_name="get_weather",
tool_call_id="call_123",
arguments={"city": "San Francisco"}
)
# Allow cancellation on user interruption
frame = FunctionCallInProgressFrame(
function_name="long_running_search",
tool_call_id="call_456",
arguments={"query": "complex search"},
cancel_on_interruption=True
)
"""
function_name: str
tool_call_id: str
arguments: Any
cancel_on_interruption: bool = False{ .api }
from pipecat.frames.frames import FunctionCallCancelFrame
class FunctionCallCancelFrame(SystemFrame):
"""Cancel function call.
A frame signaling that a function call has been cancelled. Used to request
cancellation of a running function call, typically due to user interruption
or timeout.
Attributes:
function_name (str): Name of the function that was cancelled
tool_call_id (str): Unique identifier for the cancelled function call
Example:
# Cancel a specific function call
frame = FunctionCallCancelFrame(
function_name="long_running_task",
tool_call_id="call_789"
)
await task.queue_frame(frame)
"""
function_name: str
tool_call_id: str{ .api }
from pipecat.frames.frames import LLMContextAssistantTimestampFrame
class LLMContextAssistantTimestampFrame(DataFrame):
"""Timestamp information for assistant messages in LLM context.
Tracks timing information for assistant responses in universal LLM context.
Use with LLMContext and LLMContextAggregatorPair for recording when assistant
messages were generated.
Attributes:
timestamp (str): Timestamp when the assistant message was created
Example:
# Add timestamp to assistant response
import datetime
timestamp = datetime.datetime.utcnow().isoformat() + "Z"
frame = LLMContextAssistantTimestampFrame(timestamp=timestamp)
await task.queue_frame(frame)
"""
timestamp: str{ .api }
from pipecat.frames.frames import OpenAILLMContextAssistantTimestampFrame
class OpenAILLMContextAssistantTimestampFrame(DataFrame):
"""Timestamp information for assistant messages in LLM context.
DEPRECATED: Use LLMContextAssistantTimestampFrame with universal LLMContext instead.
.. deprecated:: 0.0.99
`OpenAILLMContextAssistantTimestampFrame` is deprecated and will be removed
in a future version. Use `LLMContextAssistantTimestampFrame` with the universal
`LLMContext` and `LLMContextAggregatorPair` instead. See `OpenAILLMContext`
docstring for migration guide.
Tracks timing information for OpenAI assistant responses. This was used with
the legacy OpenAI-specific context system.
Attributes:
timestamp (str): Timestamp when the assistant message was created
Example:
# DEPRECATED - Use LLMContextAssistantTimestampFrame instead
import datetime
timestamp = datetime.datetime.utcnow().isoformat() + "Z"
frame = OpenAILLMContextAssistantTimestampFrame(timestamp=timestamp)
"""
timestamp: str{ .api }
from pipecat.frames.frames import LLMMessagesFrame
class LLMMessagesFrame(DataFrame):
"""DEPRECATED: LLM messages for chat completion.
.. deprecated:: 0.0.79
This class is deprecated and will be removed in a future version.
Instead, use either:
- `LLMMessagesUpdateFrame` with `run_llm=True`
- `OpenAILLMContextFrame` with desired messages in a new context
A frame containing a list of LLM messages. Used to signal that an LLM service
should run a chat completion and emit an LLMFullResponseStartFrame, TextFrames
and an LLMFullResponseEndFrame. Note that the `messages` property in this class
is mutable, and will be updated by various aggregators.
Attributes:
messages (List[dict]): List of message dictionaries in LLM format
Example:
# DEPRECATED - Use LLMMessagesUpdateFrame instead
frame = LLMMessagesFrame(messages=[
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Hello"}
])
"""
messages: List[dict]{ .api }
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
class OpenAILLMContext:
"""DEPRECATED: OpenAI-specific context.
.. deprecated:: 0.0.90
Use universal LLMContext instead.
OpenAI-specific context implementation. Replaced by
provider-agnostic LLMContext.
"""
pass{ .api }
from pipecat.processors.aggregators.llm_context import LLMContext
from pipecat.frames.frames import LLMContextFrame
# Create context with system message
context = LLMContext(
messages=[
{"role": "system", "content": "You are a helpful assistant."}
],
settings={"temperature": 0.7}
)
# Add tools
context.tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}
]
# Create frame
frame = LLMContextFrame(context=context){ .api }
from pipecat.frames.frames import LLMMessagesAppendFrame
# Append user message
frame = LLMMessagesAppendFrame(messages=[
{"role": "user", "content": "What's the weather in NYC?"}
])
await task.queue_frame(frame){ .api }
from pipecat.frames.frames import LLMRunFrame
# After context is set up, trigger LLM
await task.queue_frame(LLMRunFrame())
# LLM will process context and stream response:
# -> LLMFullResponseStartFrame
# -> LLMTextFrame("The weather...")
# -> LLMFullResponseEndFrame{ .api }
from pipecat.services.llm_service import LLMService
from pipecat.frames.frames import FunctionCallResultFrame
class MyLLM(LLMService):
async def process_frame(self, frame, direction):
if isinstance(frame, FunctionCallFromLLM):
# Execute function
try:
result = await self._execute_function(
frame.function_name,
frame.arguments
)
# Return result
result_frame = FunctionCallResultFrame(
call_id=frame.call_id,
function_name=frame.function_name,
result=result
)
await self.push_frame(result_frame, direction)
except Exception as e:
# Return error
error_frame = FunctionCallResultFrame(
call_id=frame.call_id,
function_name=frame.function_name,
error=str(e)
)
await self.push_frame(error_frame, direction)
await self.push_frame(frame, direction){ .api }
from pipecat.frames.frames import LLMUpdateSettingsFrame
# Adjust temperature during conversation
frame = LLMUpdateSettingsFrame(settings={
"temperature": 0.2 # More deterministic
})
await task.queue_frame(frame)
# Later, make it more creative
frame = LLMUpdateSettingsFrame(settings={
"temperature": 1.0
})
await task.queue_frame(frame){ .api }
from pipecat.frames.frames import LLMSetToolChoiceFrame
# Force tool use
frame = LLMSetToolChoiceFrame(tool_choice="required")
await task.queue_frame(frame)
# Force specific tool
frame = LLMSetToolChoiceFrame(tool_choice={
"type": "function",
"function": {"name": "get_weather"}
})
await task.queue_frame(frame)
# Disable tools
frame = LLMSetToolChoiceFrame(tool_choice="none")
await task.queue_frame(frame){ .api }
class ContextManager(FrameProcessor):
"""Manage LLM context efficiently."""
def __init__(self, max_messages: int = 100):
super().__init__()
self._max_messages = max_messages
async def process_frame(self, frame, direction):
if isinstance(frame, LLMContextFrame):
# Trim old messages
if len(frame.context.messages) > self._max_messages:
# Keep system message + recent messages
system_msgs = [m for m in frame.context.messages if m["role"] == "system"]
recent_msgs = frame.context.messages[-self._max_messages:]
frame.context.messages = system_msgs + recent_msgs
await self.push_frame(frame, direction){ .api }
class FunctionCallErrorHandler(FrameProcessor):
"""Handle function call errors gracefully."""
async def process_frame(self, frame, direction):
if isinstance(frame, FunctionCallResultFrame):
if frame.error:
# Log error
logger.error(f"Function {frame.function_name} failed: {frame.error}")
# Add helpful context
frame.result = {
"error": frame.error,
"suggestion": "Try rephrasing your request"
}
await self.push_frame(frame, direction)