Python client to interact with Aleph Alpha API endpoints
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Conversational AI interface with message handling, streaming, and structured output support. Provides a natural conversational interface with support for system messages, multimodal content, and real-time streaming responses.
Configure conversational interactions with role-based messaging and response controls.
class ChatRequest:
model: str
messages: Sequence[Union[Message, TextMessage]]
maximum_tokens: Optional[int] = None
temperature: Optional[float] = None
top_k: Optional[int] = None
top_p: Optional[float] = None
stream_options: Optional[StreamOptions] = None
steering_concepts: Optional[List[str]] = None
response_format: Optional[ResponseFormat] = None
"""
Request configuration for chat completion.
Attributes:
- model: Model name to use for chat
- messages: Conversation history as sequence of messages
- maximum_tokens: Maximum tokens to generate in response
- temperature: Sampling temperature for response generation
- top_k: Top-k sampling parameter
- top_p: Top-p/nucleus sampling parameter
- stream_options: Configuration for streaming responses
- steering_concepts: Concept IDs for content steering
- response_format: Structured output format specification
"""
def to_json(self) -> Mapping[str, Any]:
"""Serialize request to JSON format."""Flexible message structures supporting both simple text and rich multimodal content.
class Message:
role: Role
content: Union[str, List[Union[str, Image]]]
"""
Chat message with role and multimodal content.
Attributes:
- role: Message role (user, assistant, system)
- content: Text string or mixed list of text and images
"""
def to_json(self) -> Mapping[str, Any]:
"""Serialize message to JSON format."""
class TextMessage:
role: Role
content: str
"""
Text-only chat message.
Attributes:
- role: Message role (user, assistant, system)
- content: Text content only
"""
@staticmethod
def from_json(json: Dict[str, Any]) -> TextMessage:
"""Create TextMessage from JSON data."""
def to_json(self) -> Mapping[str, Any]:
"""Serialize message to JSON format."""Response structure containing generated assistant messages with completion metadata.
class ChatResponse:
finish_reason: FinishReason
message: TextMessage
"""
Response from chat completion.
Attributes:
- finish_reason: Why generation stopped
- message: Generated assistant response message
"""
@staticmethod
def from_json(json: Dict[str, Any]) -> ChatResponse:
"""Create response from JSON data."""Enumeration defining the different participant roles in a conversation.
class Role(str, Enum):
User = "user" # User/human messages
Assistant = "assistant" # AI assistant responses
System = "system" # System instructions/contextEnumerations and classes for controlling chat completion behavior.
class FinishReason(str, Enum):
Stop = "stop" # Natural completion or stop sequence
Length = "length" # Maximum length reached
ContentFilter = "content_filter" # Content filtering triggered
class StreamOptions:
include_usage: bool
"""
Configuration for streaming responses.
Attributes:
- include_usage: Include token usage statistics in stream
"""
class Usage:
completion_tokens: int
prompt_tokens: int
total_tokens: int
"""
Token usage statistics.
Attributes:
- completion_tokens: Tokens used in generated response
- prompt_tokens: Tokens used in input messages
- total_tokens: Total tokens used (prompt + completion)
"""Generate chat responses using the synchronous client.
def chat(self, request: ChatRequest, model: str) -> ChatResponse:
"""
Generate chat response.
Parameters:
- request: Chat configuration with message history
- model: Model name to use for generation
Returns:
ChatResponse with assistant message
"""Generate chat responses asynchronously with optional real-time streaming.
async def chat(self, request: ChatRequest, model: str) -> ChatResponse:
"""
Generate chat response (async).
Parameters:
- request: Chat configuration with message history
- model: Model name to use for generation
Returns:
ChatResponse with assistant message
"""
async def chat_with_streaming(
self,
request: ChatRequest,
model: str
) -> AsyncGenerator[Union[ChatStreamChunk, Usage, FinishReason], None]:
"""
Generate chat response with streaming.
Parameters:
- request: Chat configuration with message history
- model: Model name to use for generation
Yields:
Stream chunks, usage stats, and finish reason
"""Data structures for handling streaming chat responses.
class ChatStreamChunk:
content: str
role: Optional[Role]
"""
Streaming chat response chunk.
Attributes:
- content: Partial response content
- role: Role (only present in first chunk)
"""Basic chat interactions and advanced features:
from aleph_alpha_client import (
Client, ChatRequest, Message, TextMessage, Role,
FinishReason, StreamOptions
)
client = Client(token="your-api-token")
# Simple chat conversation
messages = [
TextMessage(role=Role.System, content="You are a helpful AI assistant."),
TextMessage(role=Role.User, content="What is the capital of Spain?")
]
request = ChatRequest(
model="luminous-extended",
messages=messages,
maximum_tokens=100,
temperature=0.7
)
response = client.chat(request, model="luminous-extended")
print(f"Assistant: {response.message.content}")
print(f"Finish reason: {response.finish_reason}")
# Multi-turn conversation
conversation = [
TextMessage(role=Role.System, content="You are a helpful coding assistant."),
TextMessage(role=Role.User, content="How do I reverse a string in Python?"),
]
# Get first response
request = ChatRequest(model="luminous-extended", messages=conversation)
response = client.chat(request, model="luminous-extended")
# Add assistant response to conversation
conversation.append(response.message)
# Continue conversation
conversation.append(TextMessage(
role=Role.User,
content="Can you show me a more efficient way?"
))
request = ChatRequest(model="luminous-extended", messages=conversation)
response = client.chat(request, model="luminous-extended")
print(f"Assistant: {response.message.content}")
# Multimodal chat with images
from aleph_alpha_client import Image
image = Image.from_file("chart.png")
multimodal_message = Message(
role=Role.User,
content=["What trends do you see in this chart?", image]
)
request = ChatRequest(
model="luminous-extended",
messages=[multimodal_message],
maximum_tokens=200
)
response = client.chat(request, model="luminous-extended")
print(response.message.content)
# Streaming chat (async)
import asyncio
async def streaming_chat():
async with AsyncClient(token="your-api-token") as client:
messages = [
TextMessage(role=Role.User, content="Tell me a story about robots.")
]
request = ChatRequest(
model="luminous-extended",
messages=messages,
maximum_tokens=300,
temperature=0.8,
stream_options=StreamOptions(include_usage=True)
)
print("Assistant: ", end="", flush=True)
async for item in client.chat_with_streaming(request, "luminous-extended"):
if isinstance(item, ChatStreamChunk):
print(item.content, end="", flush=True)
elif isinstance(item, Usage):
print(f"\nTokens used: {item.total_tokens}")
elif isinstance(item, FinishReason):
print(f"\nFinished: {item}")
asyncio.run(streaming_chat())
# Structured output (if supported)
from aleph_alpha_client import JSONSchema
# Define response schema
schema = JSONSchema(
name="story_analysis",
description="Analysis of a story",
schema={
"type": "object",
"properties": {
"theme": {"type": "string"},
"characters": {"type": "array", "items": {"type": "string"}},
"rating": {"type": "integer", "minimum": 1, "maximum": 10}
},
"required": ["theme", "characters", "rating"]
}
)
request = ChatRequest(
model="luminous-extended",
messages=[
TextMessage(role=Role.User, content="Analyze this story: [story text]")
],
response_format=schema
)
response = client.chat(request, model="luminous-extended")
# Response will be structured JSON matching the schemaInstall with Tessl CLI
npx tessl i tessl/pypi-aleph-alpha-client