Semantic Kernel Python SDK - comprehensive AI development framework for building AI agents and multi-agent systems
—
Structured data types for representing messages, media content, function calls, conversation history, and real-time events. Provides a comprehensive type system for all forms of content that flow through Semantic Kernel.
Core conversation management with support for multi-turn dialogs, role-based messaging, and conversation persistence.
class ChatHistory:
"""
Container for managing conversation history and message sequences.
"""
def __init__(self, messages: list[ChatMessageContent] | None = None):
"""
Initialize ChatHistory with optional initial messages.
Parameters:
- messages: Initial list of ChatMessageContent messages
"""
def add_message(self, message: ChatMessageContent) -> None:
"""
Add a message to the chat history.
Parameters:
- message: ChatMessageContent to add to the history
"""
def add_user_message(self, content: str) -> None:
"""
Add a user message to the chat history.
Parameters:
- content: The user's message content
"""
def add_assistant_message(self, content: str) -> None:
"""
Add an assistant message to the chat history.
Parameters:
- content: The assistant's message content
"""
def add_system_message(self, content: str) -> None:
"""
Add a system message to the chat history.
Parameters:
- content: The system message content
"""
def remove_message(self, message: ChatMessageContent) -> bool:
"""
Remove a message from the chat history.
Parameters:
- message: The message to remove
Returns:
True if message was found and removed, False otherwise
"""
def clear(self) -> None:
"""Clear all messages from the chat history."""
@property
def messages(self) -> list[ChatMessageContent]:
"""Get all messages in the chat history."""
def __len__(self) -> int:
"""Get the number of messages in the history."""
def __iter__(self):
"""Iterate over messages in the history."""
class ChatMessageContent:
"""
Represents a single message in a conversation.
"""
def __init__(
self,
role: AuthorRole,
content: str | None = None,
name: str | None = None,
items: list[KernelContent] | None = None,
encoding: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize a chat message.
Parameters:
- role: The role of the message author (user, assistant, system, etc.)
- content: Text content of the message
- name: Optional name/identifier for the message author
- items: List of content items (text, images, function calls, etc.)
- encoding: Text encoding format
- ai_model_id: ID of the AI model that generated this message
- metadata: Additional metadata for the message
- inner_content: Raw content from the underlying AI service
"""
@property
def role(self) -> AuthorRole:
"""Get the message author role."""
@property
def content(self) -> str:
"""Get the text content of the message."""
@property
def name(self) -> str | None:
"""Get the message author name."""
@property
def items(self) -> list[KernelContent]:
"""Get the content items in this message."""
def __str__(self) -> str:
"""String representation of the message."""
class StreamingChatMessageContent:
"""
Represents a streaming chunk of a chat message.
"""
def __init__(
self,
role: AuthorRole | None = None,
content: str | None = None,
choice_index: int | None = None,
name: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize a streaming chat message chunk.
Parameters:
- role: The role of the message author
- content: Partial text content for this chunk
- choice_index: Index of this choice in multi-choice responses
- name: Optional name/identifier for the message author
- ai_model_id: ID of the AI model generating this content
- metadata: Additional metadata for the chunk
- inner_content: Raw content from the underlying AI service
"""
@property
def role(self) -> AuthorRole | None:
"""Get the message author role."""
@property
def content(self) -> str | None:
"""Get the partial text content."""
@property
def choice_index(self) -> int | None:
"""Get the choice index."""Individual content items that can be included in messages.
class TextContent:
"""
Represents text content within a message.
"""
def __init__(
self,
text: str,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None,
encoding: str | None = None
):
"""
Initialize text content.
Parameters:
- text: The text content
- ai_model_id: ID of the AI model that generated this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
- encoding: Text encoding format
"""
@property
def text(self) -> str:
"""Get the text content."""
def __str__(self) -> str:
"""String representation of the text content."""
class StreamingTextContent:
"""
Represents streaming text content.
"""
def __init__(
self,
choice_index: int,
text: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize streaming text content.
Parameters:
- choice_index: Index of this choice in multi-choice responses
- text: Partial text content for this chunk
- ai_model_id: ID of the AI model generating this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def text(self) -> str | None:
"""Get the partial text content."""
@property
def choice_index(self) -> int:
"""Get the choice index."""
class ImageContent:
"""
Represents image content within a message.
"""
def __init__(
self,
uri: str | None = None,
data: bytes | None = None,
mime_type: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize image content.
Parameters:
- uri: URI/URL to the image
- data: Raw image data bytes
- mime_type: MIME type of the image (e.g., 'image/jpeg')
- ai_model_id: ID of the AI model that generated this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def uri(self) -> str | None:
"""Get the image URI."""
@property
def data(self) -> bytes | None:
"""Get the raw image data."""
@property
def mime_type(self) -> str | None:
"""Get the image MIME type."""
class AudioContent:
"""
Represents audio content within a message.
"""
def __init__(
self,
uri: str | None = None,
data: bytes | None = None,
mime_type: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize audio content.
Parameters:
- uri: URI/URL to the audio file
- data: Raw audio data bytes
- mime_type: MIME type of the audio (e.g., 'audio/wav')
- ai_model_id: ID of the AI model that generated this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def uri(self) -> str | None:
"""Get the audio URI."""
@property
def data(self) -> bytes | None:
"""Get the raw audio data."""
@property
def mime_type(self) -> str | None:
"""Get the audio MIME type."""
class FileReferenceContent:
"""
Represents a file reference within a message.
"""
def __init__(
self,
file_id: str,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize file reference content.
Parameters:
- file_id: Unique identifier for the referenced file
- ai_model_id: ID of the AI model that generated this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def file_id(self) -> str:
"""Get the file ID."""
class StreamingFileReferenceContent:
"""
Represents streaming file reference content.
"""
def __init__(
self,
choice_index: int,
file_id: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize streaming file reference content.
Parameters:
- choice_index: Index of this choice in multi-choice responses
- file_id: Unique identifier for the referenced file
- ai_model_id: ID of the AI model generating this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def file_id(self) -> str | None:
"""Get the file ID."""
@property
def choice_index(self) -> int:
"""Get the choice index."""Content types for representing function calls and their results within conversations.
class FunctionCallContent:
"""
Represents a function call within a message.
"""
def __init__(
self,
id: str,
name: str,
arguments: dict[str, Any] | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize function call content.
Parameters:
- id: Unique identifier for this function call
- name: Name of the function to call
- arguments: Arguments to pass to the function
- ai_model_id: ID of the AI model that generated this call
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def id(self) -> str:
"""Get the function call ID."""
@property
def name(self) -> str:
"""Get the function name."""
@property
def arguments(self) -> dict[str, Any]:
"""Get the function arguments."""
class FunctionResultContent:
"""
Represents the result of a function call within a message.
"""
def __init__(
self,
call_id: str,
name: str,
result: Any | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize function result content.
Parameters:
- call_id: ID of the function call this result corresponds to
- name: Name of the function that was called
- result: The result value from the function execution
- ai_model_id: ID of the AI model involved in this call
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def call_id(self) -> str:
"""Get the function call ID."""
@property
def name(self) -> str:
"""Get the function name."""
@property
def result(self) -> Any:
"""Get the function result."""
class AnnotationContent:
"""
Represents annotation content within a message.
"""
def __init__(
self,
file_id: str,
quote: str | None = None,
start_index: int | None = None,
end_index: int | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize annotation content.
Parameters:
- file_id: ID of the file being annotated
- quote: Quoted text from the file
- start_index: Start index of the annotation in the file
- end_index: End index of the annotation in the file
- ai_model_id: ID of the AI model that generated this annotation
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def file_id(self) -> str:
"""Get the file ID."""
@property
def quote(self) -> str | None:
"""Get the quoted text."""
class StreamingAnnotationContent:
"""
Represents streaming annotation content.
"""
def __init__(
self,
choice_index: int,
file_id: str | None = None,
quote: str | None = None,
start_index: int | None = None,
end_index: int | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize streaming annotation content.
Parameters:
- choice_index: Index of this choice in multi-choice responses
- file_id: ID of the file being annotated
- quote: Quoted text from the file
- start_index: Start index of the annotation in the file
- end_index: End index of the annotation in the file
- ai_model_id: ID of the AI model generating this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def choice_index(self) -> int:
"""Get the choice index."""
@property
def file_id(self) -> str | None:
"""Get the file ID."""
class ReasoningContent:
"""
Represents reasoning/thinking content within a message.
"""
def __init__(
self,
content: str,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize reasoning content.
Parameters:
- content: The reasoning or thinking text
- ai_model_id: ID of the AI model that generated this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def content(self) -> str:
"""Get the reasoning content."""
class StreamingReasoningContent:
"""
Represents streaming reasoning/thinking content.
"""
def __init__(
self,
choice_index: int,
content: str | None = None,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None,
inner_content: Any | None = None
):
"""
Initialize streaming reasoning content.
Parameters:
- choice_index: Index of this choice in multi-choice responses
- content: Partial reasoning content for this chunk
- ai_model_id: ID of the AI model generating this content
- metadata: Additional metadata
- inner_content: Raw content from the underlying AI service
"""
@property
def choice_index(self) -> int:
"""Get the choice index."""
@property
def content(self) -> str | None:
"""Get the reasoning content."""Event types for real-time communication scenarios.
class RealtimeEvent:
"""
Base class for real-time events.
"""
def __init__(
self,
event_type: str,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time event.
Parameters:
- event_type: Type of the real-time event
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def event_type(self) -> str:
"""Get the event type."""
class RealtimeTextEvent(RealtimeEvent):
"""
Real-time text event.
"""
def __init__(
self,
text: str,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time text event.
Parameters:
- text: The text content
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def text(self) -> str:
"""Get the text content."""
class RealtimeAudioEvent(RealtimeEvent):
"""
Real-time audio event.
"""
def __init__(
self,
audio_data: bytes,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time audio event.
Parameters:
- audio_data: Raw audio data
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def audio_data(self) -> bytes:
"""Get the audio data."""
class RealtimeFunctionCallEvent(RealtimeEvent):
"""
Real-time function call event.
"""
def __init__(
self,
call_id: str,
name: str,
arguments: dict[str, Any],
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time function call event.
Parameters:
- call_id: Unique identifier for the function call
- name: Name of the function to call
- arguments: Arguments to pass to the function
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def call_id(self) -> str:
"""Get the function call ID."""
@property
def name(self) -> str:
"""Get the function name."""
@property
def arguments(self) -> dict[str, Any]:
"""Get the function arguments."""
class RealtimeFunctionResultEvent(RealtimeEvent):
"""
Real-time function result event.
"""
def __init__(
self,
call_id: str,
result: Any,
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time function result event.
Parameters:
- call_id: ID of the function call this result corresponds to
- result: The result value from function execution
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def call_id(self) -> str:
"""Get the function call ID."""
@property
def result(self) -> Any:
"""Get the function result."""
class RealtimeImageEvent(RealtimeEvent):
"""
Real-time image event.
"""
def __init__(
self,
image_data: bytes,
mime_type: str = "image/jpeg",
ai_model_id: str | None = None,
metadata: dict[str, Any] | None = None
):
"""
Initialize a real-time image event.
Parameters:
- image_data: Raw image data
- mime_type: MIME type of the image
- ai_model_id: ID of the AI model associated with this event
- metadata: Additional metadata
"""
@property
def image_data(self) -> bytes:
"""Get the image data."""
@property
def mime_type(self) -> str:
"""Get the image MIME type."""
class RealtimeEvents:
"""
Container for managing real-time events.
"""
def __init__(self):
"""Initialize real-time events container."""
def add_event(self, event: RealtimeEvent) -> None:
"""
Add an event to the container.
Parameters:
- event: The real-time event to add
"""
def get_events(self, event_type: str | None = None) -> list[RealtimeEvent]:
"""
Get events from the container.
Parameters:
- event_type: Filter by event type (optional)
Returns:
List of matching real-time events
"""Utility classes and enums for working with content.
class AuthorRole:
"""
Enumeration of message author roles.
"""
USER: str = "user"
ASSISTANT: str = "assistant"
SYSTEM: str = "system"
TOOL: str = "tool"
class FinishReason:
"""
Enumeration of completion finish reasons.
"""
STOP: str = "stop"
LENGTH: str = "length"
FUNCTION_CALL: str = "function_call"
CONTENT_FILTER: str = "content_filter"
TOOL_CALLS: str = "tool_calls"
class ChatHistoryReducer:
"""
Base class for reducing chat history size.
"""
async def reduce(self, chat_history: ChatHistory) -> bool:
"""
Reduce the size of chat history.
Parameters:
- chat_history: The chat history to reduce
Returns:
True if reduction was performed, False otherwise
"""
class ChatHistoryTruncationReducer(ChatHistoryReducer):
"""
Reduces chat history by truncating old messages.
"""
def __init__(self, target_count: int):
"""
Initialize truncation reducer.
Parameters:
- target_count: Target number of messages to keep
"""
async def reduce(self, chat_history: ChatHistory) -> bool:
"""
Reduce chat history by removing oldest messages.
Parameters:
- chat_history: The chat history to reduce
Returns:
True if messages were removed, False otherwise
"""
class ChatHistorySummarizationReducer(ChatHistoryReducer):
"""
Reduces chat history by summarizing old messages.
"""
def __init__(
self,
target_count: int,
kernel: Kernel,
summarization_function: KernelFunction
):
"""
Initialize summarization reducer.
Parameters:
- target_count: Target number of messages to keep
- kernel: Kernel instance for summarization
- summarization_function: Function to use for summarization
"""
async def reduce(self, chat_history: ChatHistory) -> bool:
"""
Reduce chat history by summarizing older messages.
Parameters:
- chat_history: The chat history to reduce
Returns:
True if summarization was performed, False otherwise
"""from semantic_kernel.contents import ChatHistory, ChatMessageContent, AuthorRole
# Create and manage chat history
chat_history = ChatHistory()
# Add messages
chat_history.add_user_message("Hello, how are you?")
chat_history.add_assistant_message("I'm doing well, thank you! How can I help you today?")
chat_history.add_user_message("Can you help me with Python programming?")
# Access messages
for message in chat_history:
print(f"{message.role}: {message.content}")
# Get message count
print(f"Total messages: {len(chat_history)}")from semantic_kernel.contents import ChatMessageContent, TextContent, ImageContent, AuthorRole
# Create a message with mixed content
message = ChatMessageContent(
role=AuthorRole.USER,
items=[
TextContent("What's in this image?"),
ImageContent(uri="https://example.com/image.jpg")
]
)
# Access different content types
for item in message.items:
if isinstance(item, TextContent):
print(f"Text: {item.text}")
elif isinstance(item, ImageContent):
print(f"Image URI: {item.uri}")from semantic_kernel.contents import FunctionCallContent, FunctionResultContent
# Create function call content
function_call = FunctionCallContent(
id="call_123",
name="get_weather",
arguments={"location": "Seattle", "units": "fahrenheit"}
)
# Create function result content
function_result = FunctionResultContent(
call_id="call_123",
name="get_weather",
result="The weather in Seattle is 72°F with partly cloudy skies"
)
# Add to chat history
chat_history.add_message(ChatMessageContent(
role=AuthorRole.ASSISTANT,
items=[function_call]
))
chat_history.add_message(ChatMessageContent(
role=AuthorRole.TOOL,
items=[function_result]
))Install with Tessl CLI
npx tessl i tessl/pypi-semantic-kernel