Model Context Protocol SDK for building MCP servers and clients in Python
—
Low-level server implementation providing full control over MCP protocol handling. The Server class offers decorator-based request handlers, custom lifecycle management, and fine-grained control over protocol capabilities and session management.
Core server class for implementing MCP protocol handlers with full control over request processing and capabilities.
class Server:
def __init__(
self,
name: str,
version: str | None = None,
instructions: str | None = None,
lifespan: Callable = default_lifespan,
):
"""
Initialize a low-level MCP server.
Parameters:
- name: Server name for identification
- version: Server version string
- instructions: Server instructions/description
- lifespan: Server lifespan management function
"""
def list_tools(self) -> Callable:
"""
Decorator for tool listing handler.
Returns:
Decorator function that expects handler returning list[Tool]
"""
def call_tool(self) -> Callable:
"""
Decorator for tool execution handler.
Returns:
Decorator function that expects handler with (name: str, arguments: dict) -> list[ContentBlock]
"""
def list_resources(self) -> Callable:
"""
Decorator for resource listing handler.
Returns:
Decorator function that expects handler returning list[Resource]
"""
def read_resource(self) -> Callable:
"""
Decorator for resource reading handler.
Returns:
Decorator function that expects handler with (uri: AnyUrl) -> list[ContentBlock]
"""
def subscribe_resource(self) -> Callable:
"""
Decorator for resource subscription handler.
Returns:
Decorator function that expects handler with (uri: AnyUrl) -> None
"""
def unsubscribe_resource(self) -> Callable:
"""
Decorator for resource unsubscription handler.
Returns:
Decorator function that expects handler with (uri: AnyUrl) -> None
"""
def list_prompts(self) -> Callable:
"""
Decorator for prompt listing handler.
Returns:
Decorator function that expects handler returning list[Prompt]
"""
def get_prompt(self) -> Callable:
"""
Decorator for prompt retrieval handler.
Returns:
Decorator function that expects handler with (name: str, arguments: dict) -> GetPromptResult
"""
def set_logging_level(self) -> Callable:
"""
Decorator for logging level setting handler.
Returns:
Decorator function that expects handler with (level: LoggingLevel) -> None
"""
def completion(self) -> Callable:
"""
Decorator for completion handler.
Returns:
Decorator function that expects handler with completion request -> list[Completion]
"""
async def run(
self,
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
options: InitializationOptions
) -> None:
"""
Run the server with provided streams and options.
Parameters:
- read_stream: Stream for receiving client messages
- write_stream: Stream for sending responses to client
- options: Server initialization options
"""
async def create_initialization_options(self) -> InitializationOptions:
"""
Create initialization options for the server.
Returns:
InitializationOptions with server capabilities
"""Configuration and options classes for server initialization and capabilities.
class InitializationOptions:
def __init__(
self,
server_name: str,
server_version: str,
capabilities: ServerCapabilities,
instructions: str | None = None,
):
"""
Server initialization options.
Parameters:
- server_name: Name of the server
- server_version: Version of the server
- capabilities: Server capabilities declaration
- instructions: Server instructions/description
"""
class NotificationOptions:
def __init__(
self,
tools_changed: bool = True,
resources_changed: bool = True,
prompts_changed: bool = True,
logging: bool = True,
progress: bool = True,
):
"""
Notification capability options.
Parameters:
- tools_changed: Enable tool change notifications
- resources_changed: Enable resource change notifications
- prompts_changed: Enable prompt change notifications
- logging: Enable logging notifications
- progress: Enable progress notifications
"""Low-level context access for request handling and session management.
def request_ctx() -> RequestContext:
"""
Get the current request context.
Returns:
RequestContext for the current request
"""
class RequestContext:
@property
def request_id(self) -> str:
"""Current request identifier."""
@property
def session(self) -> ServerSession:
"""Current server session."""
@property
def client_info(self) -> Implementation:
"""Client implementation information."""
@property
def server_info(self) -> Implementation:
"""Server implementation information."""
async def send_notification(
self,
method: str,
params: dict[str, Any] | None = None
) -> None:
"""
Send notification to client.
Parameters:
- method: Notification method name
- params: Notification parameters
"""
async def send_progress_notification(
self,
progress_token: ProgressToken,
progress: float,
total: float | None = None
) -> None:
"""
Send progress notification to client.
Parameters:
- progress_token: Progress tracking token
- progress: Current progress value
- total: Total expected value
"""
async def send_logging_message(
self,
level: LoggingLevel,
message: str,
logger: str | None = None
) -> None:
"""
Send log message to client.
Parameters:
- level: Log level
- message: Log message
- logger: Logger name
"""from mcp.server import Server
from mcp import Tool, Resource, TextContent
import asyncio
# Create server instance
server = Server("basic-server", version="1.0.0")
# Tool management
available_tools = [
Tool(
name="echo",
description="Echo the input message",
inputSchema={
"type": "object",
"properties": {
"message": {"type": "string"}
},
"required": ["message"]
}
)
]
@server.list_tools()
async def list_tools():
"""Return list of available tools."""
return available_tools
@server.call_tool()
async def call_tool(name: str, arguments: dict):
"""Handle tool execution."""
if name == "echo":
message = arguments.get("message", "")
return [TextContent(type="text", text=f"Echo: {message}")]
else:
raise ValueError(f"Unknown tool: {name}")
# Resource management
available_resources = [
Resource(
uri="memory://stats",
name="Server Statistics",
description="Current server statistics"
)
]
@server.list_resources()
async def list_resources():
"""Return list of available resources."""
return available_resources
@server.read_resource()
async def read_resource(uri):
"""Handle resource reading."""
if str(uri) == "memory://stats":
stats = {
"uptime": "1 hour",
"requests": 42,
"tools_called": 12
}
content = "\n".join(f"{k}: {v}" for k, v in stats.items())
return [TextContent(type="text", text=content)]
else:
raise ValueError(f"Unknown resource: {uri}")
# Run server with stdio transport
async def main():
from mcp.server import stdio_server
from mcp.server.models import InitializationOptions
from mcp import ServerCapabilities, ToolsCapability, ResourcesCapability
# Create initialization options
capabilities = ServerCapabilities(
tools=ToolsCapability(listChanged=True),
resources=ResourcesCapability(listChanged=True, subscribe=True)
)
options = InitializationOptions(
server_name="basic-server",
server_version="1.0.0",
capabilities=capabilities
)
async with stdio_server() as (read, write):
await server.run(read, write, options)
if __name__ == "__main__":
asyncio.run(main())from mcp.server import Server, request_ctx
from mcp import *
import asyncio
server = Server("advanced-server")
# Custom completion handler
@server.completion()
async def handle_completion(request):
"""Handle completion requests for resources and prompts."""
ctx = request_ctx()
if request.ref.type == "resource":
# Complete resource URIs
if request.ref.uri.startswith("file://"):
# Return file path completions
return [
Completion(
values=["file:///tmp/example.txt", "file:///home/user/doc.md"],
total=2
)
]
return [Completion(values=[], total=0)]
# Custom logging level handler
@server.set_logging_level()
async def set_logging_level(level):
"""Handle logging level changes."""
ctx = request_ctx()
print(f"Client {ctx.client_info.name} set logging level to {level}")
# Send confirmation
await ctx.send_logging_message(
LoggingLevel.INFO,
f"Logging level set to {level}"
)
# Prompt with arguments
prompts = [
Prompt(
name="code_review",
description="Generate code review checklist",
arguments=[
PromptArgument(
name="language",
description="Programming language",
required=True
),
PromptArgument(
name="complexity",
description="Code complexity level",
required=False
)
]
)
]
@server.list_prompts()
async def list_prompts():
"""Return available prompts."""
return prompts
@server.get_prompt()
async def get_prompt(name: str, arguments: dict):
"""Handle prompt generation."""
if name == "code_review":
language = arguments.get("language", "Python")
complexity = arguments.get("complexity", "medium")
prompt_text = f"""Code Review Checklist for {language} ({complexity} complexity):
1. Code Style and Formatting
- Consistent indentation and spacing
- Meaningful variable and function names
- Appropriate comments and documentation
2. Logic and Functionality
- Code achieves intended purpose
- Edge cases are handled
- Error handling is appropriate
3. Performance Considerations
- Efficient algorithms and data structures
- Resource usage optimization
- Scalability considerations
4. Security Review
- Input validation
- Authentication and authorization
- Data sanitization
"""
return GetPromptResult(
description=f"Code review checklist for {language}",
messages=[
PromptMessage(
role=Role.user,
content=TextContent(type="text", text=prompt_text)
)
]
)
else:
raise ValueError(f"Unknown prompt: {name}")
# Resource subscription handling
@server.subscribe_resource()
async def subscribe_resource(uri):
"""Handle resource subscription."""
ctx = request_ctx()
print(f"Client subscribed to resource: {uri}")
# Send initial notification
await ctx.send_notification(
"notifications/resources/updated",
{"uri": str(uri), "reason": "subscribed"}
)
@server.unsubscribe_resource()
async def unsubscribe_resource(uri):
"""Handle resource unsubscription."""
ctx = request_ctx()
print(f"Client unsubscribed from resource: {uri}")
async def main():
from mcp.server import stdio_server
options = await server.create_initialization_options()
async with stdio_server() as (read, write):
await server.run(read, write, options)
if __name__ == "__main__":
asyncio.run(main())from mcp.server import Server, request_ctx
from mcp import TextContent
import asyncio
server = Server("progress-server")
@server.call_tool()
async def call_tool(name: str, arguments: dict):
"""Handle tool calls with progress tracking."""
ctx = request_ctx()
if name == "process_data":
items = arguments.get("items", [])
total = len(items)
results = []
for i, item in enumerate(items):
# Send progress update
await ctx.send_progress_notification(
progress_token=ctx.request_id,
progress=i,
total=total
)
# Simulate processing
await asyncio.sleep(0.1)
results.append(f"Processed: {item}")
# Send final progress
await ctx.send_progress_notification(
progress_token=ctx.request_id,
progress=total,
total=total
)
return [TextContent(
type="text",
text=f"Completed processing {total} items: {results}"
)]
return [TextContent(type="text", text="Unknown tool")]
@server.list_tools()
async def list_tools():
"""Return tools that support progress tracking."""
return [
Tool(
name="process_data",
description="Process data items with progress tracking",
inputSchema={
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {"type": "string"}
}
},
"required": ["items"]
}
)
]
async def main():
from mcp.server import stdio_server
options = await server.create_initialization_options()
async with stdio_server() as (read, write):
await server.run(read, write, options)
if __name__ == "__main__":
asyncio.run(main())from mcp.server import Server
import asyncio
import aiofiles
# Custom lifespan function
async def custom_lifespan(server_instance):
"""Custom server lifespan with setup and cleanup."""
print("Server starting up...")
# Setup: Initialize resources, connections, etc.
server_instance._data_file = await aiofiles.open("server_data.json", "w")
server_instance._active_connections = set()
try:
# Yield control to the server
yield
finally:
# Cleanup: Close resources, save state, etc.
print("Server shutting down...")
await server_instance._data_file.close()
print(f"Closed {len(server_instance._active_connections)} connections")
# Create server with custom lifespan
server = Server(
"lifecycle-server",
version="1.0.0",
lifespan=custom_lifespan
)
@server.list_tools()
async def list_tools():
"""Return simple tool list."""
return [
Tool(
name="status",
description="Get server status",
inputSchema={"type": "object", "properties": {}}
)
]
@server.call_tool()
async def call_tool(name: str, arguments: dict):
"""Handle tool calls with server state access."""
if name == "status":
connections = len(getattr(server, '_active_connections', []))
return [TextContent(
type="text",
text=f"Server is running with {connections} active connections"
)]
return [TextContent(type="text", text="Unknown tool")]
async def main():
from mcp.server import stdio_server
options = await server.create_initialization_options()
async with stdio_server() as (read, write):
await server.run(read, write, options)
if __name__ == "__main__":
asyncio.run(main())Install with Tessl CLI
npx tessl i tessl/pypi-mcp