docs
tessl install tessl/pypi-pipecat-ai@0.0.0An open source framework for building real-time voice and multimodal conversational AI agents with support for speech-to-text, text-to-speech, LLMs, and multiple transport protocols
All frames inherit from Frame base class with three main categories:
See: Audio Frames Documentation
{ .api }
from pipecat.frames.frames import (
AudioRawFrame, # Base audio with PCM data
InputAudioRawFrame, # Audio from transport (SystemFrame)
OutputAudioRawFrame, # Audio to transport (DataFrame)
TTSAudioRawFrame, # TTS-generated (DataFrame)
UserAudioRawFrame, # User audio (SystemFrame)
SpeechOutputAudioRawFrame, # Speech output (DataFrame)
)See: Text Frames Documentation
{ .api }
from pipecat.frames.frames import (
TextFrame, # Generic text (DataFrame)
LLMTextFrame, # LLM-generated text (DataFrame)
TranscriptionFrame, # STT transcription final (DataFrame)
InterimTranscriptionFrame, # STT transcription interim (SystemFrame)
TTSTextFrame, # Text for TTS (DataFrame)
VisionTextFrame, # Vision output (DataFrame)
)See: LLM Context Frames Documentation
{ .api }
from pipecat.frames.frames import (
LLMContextFrame, # Universal context (DataFrame)
LLMMessagesAppendFrame, # Append messages (DataFrame)
LLMMessagesUpdateFrame, # Update messages (DataFrame)
LLMSetToolsFrame, # Update tools (DataFrame)
LLMRunFrame, # Trigger inference (DataFrame)
LLMFullResponseStartFrame, # Response begins (ControlFrame)
LLMFullResponseEndFrame, # Response complete (ControlFrame)
)See: Control Frames Documentation
{ .api }
from pipecat.frames.frames import (
StartFrame, # Pipeline start (SystemFrame)
EndFrame, # Pipeline end (ControlFrame)
CancelFrame, # Cancel operation (SystemFrame)
StopFrame, # Stop signal (ControlFrame)
ErrorFrame, # Recoverable error (SystemFrame)
FatalErrorFrame, # Unrecoverable error (SystemFrame)
)See: Interaction Frames Documentation
{ .api }
from pipecat.frames.frames import (
UserStartedSpeakingFrame, # User began speaking (SystemFrame)
UserStoppedSpeakingFrame, # User stopped speaking (SystemFrame)
UserSpeakingFrame, # User is speaking (SystemFrame)
BotStartedSpeakingFrame, # Bot began speaking (ControlFrame)
BotStoppedSpeakingFrame, # Bot stopped speaking (ControlFrame)
BotSpeakingFrame, # Bot is speaking (ControlFrame)
VADUserStartedSpeakingFrame, # VAD detected start (SystemFrame)
VADUserStoppedSpeakingFrame, # VAD detected stop (SystemFrame)
)See: Image/Video Frames Documentation
{ .api }
from pipecat.frames.frames import (
ImageRawFrame, # Raw image data (DataFrame)
OutputImageRawFrame, # Image to transport (DataFrame)
VideoFrame, # Video frame (DataFrame)
UserImageRawFrame, # User image (SystemFrame)
UserImageRequestFrame, # Request user image (SystemFrame)
){ .api }
from pipecat.frames.frames import (
TaskFrame, # Base task frame (SystemFrame)
EndTaskFrame, # Graceful close (SystemFrame)
CancelTaskFrame, # Immediate cancel (SystemFrame)
StopTaskFrame, # Stop but keep processors (SystemFrame)
InterruptionTaskFrame, # Interrupt bot (SystemFrame)
){ .api }
from pipecat.frames.frames import (
TTSSpeakFrame, # Direct TTS request (DataFrame)
TTSStartedFrame, # TTS began (ControlFrame)
TTSStoppedFrame, # TTS stopped (ControlFrame)
TTSUpdateSettingsFrame, # Update TTS settings (ControlFrame)
){ .api }
from pipecat.frames.frames import (
STTMuteFrame, # Mute/unmute STT (SystemFrame)
STTUpdateSettingsFrame, # Update STT settings (ControlFrame)
){ .api }
from pipecat.frames.frames import (
ServiceUpdateSettingsFrame, # Base settings update (ControlFrame)
LLMUpdateSettingsFrame, # LLM settings (ControlFrame)
ServiceSwitcherFrame, # Base switcher (ControlFrame)
ManuallySwitchServiceFrame, # Manual switch (ControlFrame)
){ .api }
from pipecat.frames.frames import (
OutputTransportReadyFrame, # Transport ready (ControlFrame)
OutputTransportMessageFrame, # Output message (DataFrame)
OutputTransportMessageUrgentFrame, # Urgent output (SystemFrame)
InputTransportMessageFrame, # Input message (SystemFrame)
){ .api }
from pipecat.frames.frames import (
DTMFFrame, # Base DTMF (varies)
InputDTMFFrame, # DTMF input (SystemFrame)
OutputDTMFFrame, # DTMF output (DataFrame)
OutputDTMFUrgentFrame, # Urgent DTMF (SystemFrame)
)
from pipecat.audio.dtmf.types import KeypadEntry
# KeypadEntry: ZERO, ONE, TWO, ..., NINE, STAR, POUND{ .api }
from pipecat.frames.frames import (
FrameProcessorPauseFrame, # Pause processor (ControlFrame)
FrameProcessorResumeFrame, # Resume processor (ControlFrame)
FrameProcessorPauseUrgentFrame, # Urgent pause (SystemFrame)
FrameProcessorResumeUrgentFrame, # Urgent resume (SystemFrame)
){ .api }
from pipecat.frames.frames import (
FilterControlFrame, # Base filter control (ControlFrame)
FilterUpdateSettingsFrame, # Update filter (ControlFrame)
FilterEnableFrame, # Enable/disable filter (ControlFrame)
MixerControlFrame, # Base mixer control (ControlFrame)
MixerUpdateSettingsFrame, # Update mixer (ControlFrame)
MixerEnableFrame, # Enable/disable mixer (ControlFrame)
){ .api }
from pipecat.frames.frames import (
InterruptionFrame, # Base interruption (SystemFrame)
StartInterruptionFrame, # Start interruption (SystemFrame)
BotInterruptionFrame, # Bot interrupted (SystemFrame)
){ .api }
from pipecat.frames.frames import (
FunctionCallFromLLM, # LLM calls function (DataFrame, UninterruptibleFrame)
FunctionCallResultFrame, # Function result (DataFrame, UninterruptibleFrame)
FunctionCallCancelFrame, # Cancel function (SystemFrame)
FunctionCallsStartedFrame, # Functions started (ControlFrame)
){ .api }
from pipecat.frames.frames import MetricsFrame
class MetricsFrame(SystemFrame):
"""Carries performance/usage metrics."""
data: MetricsData # TTFBMetricsData, LLMUsageMetricsData, etc.{ .api }
from pipecat.frames.frames import (
HeartbeatFrame, # Keep-alive (ControlFrame)
VADParamsUpdateFrame, # Update VAD (ControlFrame)
SpeechControlParamsFrame, # Update speech control (SystemFrame)
)All frames inherit these attributes:
{ .api }
class Frame:
"""Base frame attributes."""
id: int # Unique frame ID
name: str # Human-readable name
pts: Optional[int] # Presentation timestamp (nanoseconds)
metadata: Dict[str, Any] # Arbitrary metadata
transport_source: Optional[str] # Source transport name
transport_destination: Optional[str] # Destination transport name{ .api }
from pipecat.processors.frame_processor import FrameDirection
class FrameDirection(Enum):
"""Direction of frame flow."""
DOWNSTREAM = "downstream" # Input → Output
UPSTREAM = "upstream" # Output → Input