Semantic Kernel Python SDK - comprehensive AI development framework for building AI agents and multi-agent systems
—
Extensible filter system for intercepting and modifying function invocation, prompt rendering, and auto-function invocation. Enables custom middleware, logging, validation, and transformation logic.
Context objects for different types of filter operations.
class FunctionInvocationContext:
"""
Context for function invocation filters.
"""
def __init__(
self,
function: KernelFunction,
kernel: Kernel,
arguments: KernelArguments
):
"""
Initialize function invocation context.
Parameters:
- function: The function being invoked
- kernel: Kernel instance
- arguments: Function arguments
"""
@property
def function(self) -> KernelFunction:
"""Get the function being invoked."""
@property
def kernel(self) -> Kernel:
"""Get the kernel instance."""
@property
def arguments(self) -> KernelArguments:
"""Get the function arguments."""
@property
def result(self) -> FunctionResult | None:
"""Get the function result (available in post-invocation filters)."""
class PromptRenderContext:
"""
Context for prompt rendering filters.
"""
def __init__(
self,
function: KernelFunction,
kernel: Kernel,
arguments: KernelArguments
):
"""
Initialize prompt render context.
Parameters:
- function: The function containing the prompt
- kernel: Kernel instance
- arguments: Arguments for prompt rendering
"""
@property
def function(self) -> KernelFunction:
"""Get the function containing the prompt."""
@property
def kernel(self) -> Kernel:
"""Get the kernel instance."""
@property
def arguments(self) -> KernelArguments:
"""Get the prompt arguments."""
@property
def rendered_prompt(self) -> str | None:
"""Get the rendered prompt (available in post-render filters)."""
class AutoFunctionInvocationContext:
"""
Context for auto function invocation filters.
"""
def __init__(
self,
function: KernelFunction,
kernel: Kernel,
arguments: KernelArguments
):
"""
Initialize auto function invocation context.
Parameters:
- function: The function being auto-invoked
- kernel: Kernel instance
- arguments: Function arguments
"""
@property
def function(self) -> KernelFunction:
"""Get the function being auto-invoked."""
@property
def kernel(self) -> Kernel:
"""Get the kernel instance."""
@property
def arguments(self) -> KernelArguments:
"""Get the function arguments."""
@property
def chat_history(self) -> ChatHistory | None:
"""Get the chat history (if applicable)."""
class FilterTypes:
"""
Enumeration of filter types.
"""
FUNCTION_INVOCATION: str = "function_invocation"
PROMPT_RENDERING: str = "prompt_rendering"
AUTO_FUNCTION_INVOCATION: str = "auto_function_invocation"The Kernel class includes filter management capabilities through extensions.
# Filter management methods available on Kernel class
class Kernel:
"""
Kernel with filter management capabilities.
"""
def add_function_invocation_filter(self, filter_func) -> None:
"""
Add a function invocation filter.
Parameters:
- filter_func: Filter function to add
"""
def add_prompt_rendering_filter(self, filter_func) -> None:
"""
Add a prompt rendering filter.
Parameters:
- filter_func: Filter function to add
"""
def add_auto_function_invocation_filter(self, filter_func) -> None:
"""
Add an auto function invocation filter.
Parameters:
- filter_func: Filter function to add
"""
@property
def function_invocation_filters(self) -> list:
"""Get all function invocation filters."""
@property
def prompt_rendering_filters(self) -> list:
"""Get all prompt rendering filters."""
@property
def auto_function_invocation_filters(self) -> list:
"""Get all auto function invocation filters."""from semantic_kernel import Kernel
from semantic_kernel.filters import FunctionInvocationContext
import logging
# Create logging filter
async def logging_filter(context: FunctionInvocationContext, next_filter):
"""Log function invocations."""
logging.info(f"Invoking function: {context.function.name}")
logging.info(f"Arguments: {context.arguments}")
# Call next filter in chain
await next_filter(context)
logging.info(f"Function result: {context.result.value}")
# Add filter to kernel
kernel = Kernel()
kernel.add_function_invocation_filter(logging_filter)from semantic_kernel.filters import PromptRenderContext
async def prompt_validation_filter(context: PromptRenderContext, next_filter):
"""Validate rendered prompts."""
# Call next filter to get rendered prompt
await next_filter(context)
# Validate rendered prompt
if context.rendered_prompt and len(context.rendered_prompt) > 10000:
raise ValueError("Rendered prompt exceeds maximum length")
print(f"Validated prompt for function: {context.function.name}")
# Add prompt validation filter
kernel.add_prompt_rendering_filter(prompt_validation_filter)from semantic_kernel.filters import AutoFunctionInvocationContext
async def function_approval_filter(context: AutoFunctionInvocationContext, next_filter):
"""Require approval for sensitive functions."""
sensitive_functions = ["delete_file", "send_email", "make_payment"]
if context.function.name in sensitive_functions:
print(f"Function {context.function.name} requires approval.")
approval = input("Approve? (y/N): ")
if approval.lower() != 'y':
print("Function invocation cancelled.")
return
# Proceed with function invocation
await next_filter(context)
# Add approval filter for auto function invocation
kernel.add_auto_function_invocation_filter(function_approval_filter)import traceback
async def error_handling_filter(context: FunctionInvocationContext, next_filter):
"""Handle and log errors during function invocation."""
try:
await next_filter(context)
except Exception as e:
logging.error(f"Error in function {context.function.name}: {str(e)}")
logging.error(f"Traceback: {traceback.format_exc()}")
# Optionally modify the result or re-raise
context.result = FunctionResult(
function=context.function,
value=f"Error occurred: {str(e)}"
)
kernel.add_function_invocation_filter(error_handling_filter)import time
async def performance_filter(context: FunctionInvocationContext, next_filter):
"""Monitor function execution performance."""
start_time = time.time()
await next_filter(context)
execution_time = time.time() - start_time
print(f"Function {context.function.name} executed in {execution_time:.2f} seconds")
# Add performance metadata to result
if context.result and context.result.metadata:
context.result.metadata["execution_time"] = execution_time
kernel.add_function_invocation_filter(performance_filter)Install with Tessl CLI
npx tessl i tessl/pypi-semantic-kernel