Semantic Kernel Python SDK - comprehensive AI development framework for building AI agents and multi-agent systems
—
Comprehensive integrations with major AI providers including chat completions, embeddings, text-to-speech, image generation, and real-time communication capabilities. Supports OpenAI, Azure OpenAI, Google AI, Anthropic, Mistral, and many other providers.
Chat completion services using OpenAI's GPT models for conversational AI and text generation.
class OpenAIChatCompletion:
"""
OpenAI chat completion service for GPT models.
"""
def __init__(
self,
ai_model_id: str,
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncOpenAI | None = None
):
"""
Initialize OpenAI chat completion service.
Parameters:
- ai_model_id: The OpenAI model ID (e.g., 'gpt-4', 'gpt-3.5-turbo')
- api_key: OpenAI API key (can also be set via environment variable)
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
- default_headers: Default headers for requests
- async_client: Pre-configured OpenAI async client
"""
async def get_chat_message_contents(
self,
chat_history: ChatHistory,
settings: OpenAIChatPromptExecutionSettings,
**kwargs
) -> list[ChatMessageContent]:
"""
Get chat message contents from OpenAI.
Parameters:
- chat_history: The conversation history
- settings: Execution settings for the request
- **kwargs: Additional arguments
Returns:
List of ChatMessageContent responses
"""
async def get_streaming_chat_message_contents(
self,
chat_history: ChatHistory,
settings: OpenAIChatPromptExecutionSettings,
**kwargs
):
"""
Get streaming chat message contents from OpenAI.
Parameters:
- chat_history: The conversation history
- settings: Execution settings for the request
- **kwargs: Additional arguments
Yields:
StreamingChatMessageContent as responses stream in
"""
class AzureChatCompletion:
"""
Azure OpenAI chat completion service.
"""
def __init__(
self,
deployment_name: str,
endpoint: str | None = None,
api_key: str | None = None,
api_version: str | None = None,
service_id: str | None = None,
ad_token: str | None = None,
ad_token_provider: Callable[[], str | Awaitable[str]] | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncAzureOpenAI | None = None
):
"""
Initialize Azure OpenAI chat completion service.
Parameters:
- deployment_name: Azure OpenAI deployment name
- endpoint: Azure OpenAI endpoint URL
- api_key: Azure OpenAI API key
- api_version: API version to use
- service_id: Unique identifier for this service instance
- ad_token: Azure AD token for authentication
- ad_token_provider: Token provider function for Azure AD auth
- default_headers: Default headers for requests
- async_client: Pre-configured Azure OpenAI async client
"""
async def get_chat_message_contents(
self,
chat_history: ChatHistory,
settings: AzureChatPromptExecutionSettings,
**kwargs
) -> list[ChatMessageContent]:
"""
Get chat message contents from Azure OpenAI.
Parameters:
- chat_history: The conversation history
- settings: Execution settings including Azure-specific options
- **kwargs: Additional arguments
Returns:
List of ChatMessageContent responses
"""Text embedding services for converting text into vector representations for semantic search and retrieval.
class OpenAITextEmbedding:
"""
OpenAI text embedding service.
"""
def __init__(
self,
ai_model_id: str,
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncOpenAI | None = None
):
"""
Initialize OpenAI text embedding service.
Parameters:
- ai_model_id: The embedding model ID (e.g., 'text-embedding-ada-002')
- api_key: OpenAI API key
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
- default_headers: Default headers for requests
- async_client: Pre-configured OpenAI async client
"""
async def generate_embeddings(
self,
texts: list[str],
**kwargs
) -> list[list[float]]:
"""
Generate embeddings for the given texts.
Parameters:
- texts: List of text strings to embed
- **kwargs: Additional arguments
Returns:
List of embedding vectors (list of floats)
"""
class AzureTextEmbedding:
"""
Azure OpenAI text embedding service.
"""
def __init__(
self,
deployment_name: str,
endpoint: str | None = None,
api_key: str | None = None,
api_version: str | None = None,
service_id: str | None = None,
ad_token: str | None = None,
ad_token_provider: Callable[[], str | Awaitable[str]] | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncAzureOpenAI | None = None
):
"""
Initialize Azure OpenAI text embedding service.
Parameters:
- deployment_name: Azure OpenAI embedding deployment name
- endpoint: Azure OpenAI endpoint URL
- api_key: Azure OpenAI API key
- api_version: API version to use
- service_id: Unique identifier for this service instance
- ad_token: Azure AD token for authentication
- ad_token_provider: Token provider function for Azure AD auth
- default_headers: Default headers for requests
- async_client: Pre-configured Azure OpenAI async client
"""
async def generate_embeddings(
self,
texts: list[str],
**kwargs
) -> list[list[float]]:
"""
Generate embeddings for the given texts using Azure OpenAI.
Parameters:
- texts: List of text strings to embed
- **kwargs: Additional arguments
Returns:
List of embedding vectors (list of floats)
"""Text-to-speech and speech-to-text capabilities for audio processing.
class OpenAITextToAudio:
"""
OpenAI text-to-audio service for speech synthesis.
"""
def __init__(
self,
ai_model_id: str = "tts-1",
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncOpenAI | None = None
):
"""
Initialize OpenAI text-to-audio service.
Parameters:
- ai_model_id: TTS model ID (e.g., 'tts-1', 'tts-1-hd')
- api_key: OpenAI API key
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
- default_headers: Default headers for requests
- async_client: Pre-configured OpenAI async client
"""
async def get_audio_contents(
self,
text: str,
settings: OpenAITextToAudioExecutionSettings,
**kwargs
) -> list[AudioContent]:
"""
Generate audio from text.
Parameters:
- text: Text to convert to speech
- settings: Audio generation settings (voice, format, etc.)
- **kwargs: Additional arguments
Returns:
List of AudioContent with generated speech
"""
class OpenAIAudioToText:
"""
OpenAI audio-to-text service for speech recognition.
"""
def __init__(
self,
ai_model_id: str = "whisper-1",
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncOpenAI | None = None
):
"""
Initialize OpenAI audio-to-text service.
Parameters:
- ai_model_id: Whisper model ID (e.g., 'whisper-1')
- api_key: OpenAI API key
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
- default_headers: Default headers for requests
- async_client: Pre-configured OpenAI async client
"""
async def get_text_contents(
self,
audio_content: AudioContent,
settings: OpenAIAudioToTextExecutionSettings,
**kwargs
) -> list[TextContent]:
"""
Convert audio to text using Whisper.
Parameters:
- audio_content: Audio content to transcribe
- settings: Transcription settings
- **kwargs: Additional arguments
Returns:
List of TextContent with transcribed text
"""DALL-E image generation services for creating images from text descriptions.
class OpenAITextToImage:
"""
OpenAI text-to-image service using DALL-E.
"""
def __init__(
self,
ai_model_id: str = "dall-e-3",
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncOpenAI | None = None
):
"""
Initialize OpenAI text-to-image service.
Parameters:
- ai_model_id: DALL-E model ID (e.g., 'dall-e-2', 'dall-e-3')
- api_key: OpenAI API key
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
- default_headers: Default headers for requests
- async_client: Pre-configured OpenAI async client
"""
async def get_image_contents(
self,
description: str,
settings: OpenAITextToImageExecutionSettings,
**kwargs
) -> list[ImageContent]:
"""
Generate images from text description.
Parameters:
- description: Text description of the image to generate
- settings: Image generation settings (size, quality, etc.)
- **kwargs: Additional arguments
Returns:
List of ImageContent with generated images
"""
class AzureTextToImage:
"""
Azure OpenAI text-to-image service.
"""
def __init__(
self,
deployment_name: str,
endpoint: str | None = None,
api_key: str | None = None,
api_version: str | None = None,
service_id: str | None = None,
ad_token: str | None = None,
ad_token_provider: Callable[[], str | Awaitable[str]] | None = None,
default_headers: dict[str, str] | None = None,
async_client: AsyncAzureOpenAI | None = None
):
"""
Initialize Azure OpenAI text-to-image service.
Parameters:
- deployment_name: Azure OpenAI DALL-E deployment name
- endpoint: Azure OpenAI endpoint URL
- api_key: Azure OpenAI API key
- api_version: API version to use
- service_id: Unique identifier for this service instance
- ad_token: Azure AD token for authentication
- ad_token_provider: Token provider function for Azure AD auth
- default_headers: Default headers for requests
- async_client: Pre-configured Azure OpenAI async client
"""
async def get_image_contents(
self,
description: str,
settings: OpenAITextToImageExecutionSettings,
**kwargs
) -> list[ImageContent]:
"""
Generate images from text description using Azure OpenAI.
Parameters:
- description: Text description of the image to generate
- settings: Image generation settings
- **kwargs: Additional arguments
Returns:
List of ImageContent with generated images
"""WebSocket and WebRTC services for real-time communication with AI models.
class OpenAIRealtimeWebsocket:
"""
OpenAI real-time WebSocket communication service.
"""
def __init__(
self,
ai_model_id: str = "gpt-4o-realtime-preview-2024-10-01",
api_key: str | None = None,
org_id: str | None = None,
service_id: str | None = None
):
"""
Initialize OpenAI real-time WebSocket service.
Parameters:
- ai_model_id: Real-time model ID
- api_key: OpenAI API key
- org_id: OpenAI organization ID
- service_id: Unique identifier for this service instance
"""
async def start_session(
self,
settings: OpenAIRealtimeExecutionSettings,
**kwargs
):
"""
Start a real-time session.
Parameters:
- settings: Real-time execution settings
- **kwargs: Additional arguments
Returns:
Session context for real-time communication
"""
class AzureRealtimeWebsocket:
"""
Azure OpenAI real-time WebSocket communication service.
"""
def __init__(
self,
deployment_name: str,
endpoint: str | None = None,
api_key: str | None = None,
api_version: str | None = None,
service_id: str | None = None,
ad_token: str | None = None,
ad_token_provider: Callable[[], str | Awaitable[str]] | None = None
):
"""
Initialize Azure OpenAI real-time WebSocket service.
Parameters:
- deployment_name: Azure OpenAI real-time deployment name
- endpoint: Azure OpenAI endpoint URL
- api_key: Azure OpenAI API key
- api_version: API version to use
- service_id: Unique identifier for this service instance
- ad_token: Azure AD token for authentication
- ad_token_provider: Token provider function for Azure AD auth
"""Semantic Kernel supports many other AI providers through dedicated connector modules:
# Anthropic Claude
from semantic_kernel.connectors.ai.anthropic import AnthropicChatCompletion
# Google AI and Vertex AI
from semantic_kernel.connectors.ai.google.google_ai import GoogleAIChatCompletion
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAIChatCompletion
# Hugging Face
from semantic_kernel.connectors.ai.hugging_face import HuggingFaceChatCompletion
# Mistral AI
from semantic_kernel.connectors.ai.mistral_ai import MistralAIChatCompletion
# NVIDIA NIM
from semantic_kernel.connectors.ai.nvidia import NVIDIAChatCompletion
# Ollama (local models)
from semantic_kernel.connectors.ai.ollama import OllamaChatCompletion
# ONNX Runtime
from semantic_kernel.connectors.ai.onnx import ONNXGenAIChatCompletion
# Amazon Bedrock
from semantic_kernel.connectors.ai.bedrock import BedrockChatCompletion
# Azure AI Inference
from semantic_kernel.connectors.ai.azure_ai_inference import AzureAIInferenceChatCompletionConfiguration classes for controlling AI service behavior.
class PromptExecutionSettings:
"""
Base class for AI service execution settings.
"""
def __init__(
self,
service_id: str | None = None,
extension_data: dict[str, Any] | None = None,
**kwargs
):
"""
Initialize execution settings.
Parameters:
- service_id: ID of the service to use for execution
- extension_data: Additional service-specific settings
- **kwargs: Additional settings
"""
class OpenAIChatPromptExecutionSettings(PromptExecutionSettings):
"""
OpenAI-specific chat execution settings.
"""
def __init__(
self,
service_id: str | None = None,
max_tokens: int | None = None,
temperature: float | None = None,
top_p: float | None = None,
frequency_penalty: float | None = None,
presence_penalty: float | None = None,
function_choice_behavior: FunctionChoiceBehavior | None = None,
**kwargs
):
"""
Initialize OpenAI chat execution settings.
Parameters:
- service_id: Service ID
- max_tokens: Maximum tokens to generate
- temperature: Sampling temperature (0.0 to 2.0)
- top_p: Nucleus sampling parameter
- frequency_penalty: Frequency penalty (-2.0 to 2.0)
- presence_penalty: Presence penalty (-2.0 to 2.0)
- function_choice_behavior: Function calling behavior
- **kwargs: Additional OpenAI parameters
"""
class FunctionChoiceBehavior:
"""
Controls how the AI service handles function calling.
"""
@staticmethod
def Auto(auto_invoke: bool = True, filters: dict | None = None) -> FunctionChoiceBehavior:
"""
Automatically choose and invoke functions.
Parameters:
- auto_invoke: Whether to automatically invoke chosen functions
- filters: Function name filters
Returns:
FunctionChoiceBehavior configured for automatic function calling
"""
@staticmethod
def Required(auto_invoke: bool = True, filters: dict | None = None) -> FunctionChoiceBehavior:
"""
Require function calling.
Parameters:
- auto_invoke: Whether to automatically invoke chosen functions
- filters: Function name filters
Returns:
FunctionChoiceBehavior configured to require function calling
"""
@staticmethod
def NoneInvoke() -> FunctionChoiceBehavior:
"""
Disable function calling.
Returns:
FunctionChoiceBehavior configured to disable function calling
"""from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_kernel.contents import ChatHistory
# Initialize service and kernel
chat_service = OpenAIChatCompletion(
ai_model_id="gpt-4",
api_key="your-api-key"
)
kernel = Kernel()
kernel.add_service(chat_service)
# Create chat history and get response
chat_history = ChatHistory()
chat_history.add_user_message("What is the capital of France?")
# Get response using kernel
result = await kernel.invoke_prompt(
"{{$chat_history}}",
chat_history=chat_history
)
print(result.value)from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAITextEmbedding
from semantic_kernel.services import AIServiceSelector
# Create multiple services
chat_service = OpenAIChatCompletion(
service_id="gpt-4-chat",
ai_model_id="gpt-4",
api_key="your-api-key"
)
embedding_service = OpenAITextEmbedding(
service_id="text-embedding",
ai_model_id="text-embedding-ada-002",
api_key="your-api-key"
)
# Initialize kernel with services
kernel = Kernel(services=[chat_service, embedding_service])
# Services are automatically selected based on capability needed
chat_result = await kernel.invoke_prompt("Hello, world!") # Uses chat service
embeddings = await embedding_service.generate_embeddings(["Hello", "World"]) # Direct service usageInstall with Tessl CLI
npx tessl i tessl/pypi-semantic-kernel