Python client to interact with Aleph Alpha API endpoints
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Generate explanations for model predictions showing which parts of the input influenced the output, with configurable granularity and postprocessing. Provides interpretability features to understand how models make decisions across text, image, and multimodal inputs.
Configure explanation generation with flexible granularity controls and postprocessing options.
class ExplanationRequest:
prompt: Prompt
target: str
contextual_control_threshold: Optional[float] = None
control_factor: Optional[float] = None
control_token_overlap: Optional[ControlTokenOverlap] = None
control_log_additive: Optional[bool] = None
prompt_granularity: Optional[Union[PromptGranularity, str, CustomGranularity]] = None
target_granularity: Optional[TargetGranularity] = None
postprocessing: Optional[ExplanationPostprocessing] = None
normalize: Optional[bool] = None
"""
Request for model explanation generation.
Attributes:
- prompt: Input prompt to explain (text, image, or multimodal)
- target: Target text to generate explanations for
- contextual_control_threshold: Threshold for attention controls
- control_factor: Factor for attention control strength
- control_token_overlap: How to handle partial token overlap
- control_log_additive: Method for applying attention controls
- prompt_granularity: Level of detail for prompt explanations
- target_granularity: Level of detail for target explanations
- postprocessing: Score transformation options
- normalize: Normalize explanation scores
"""
def explain(self, request: ExplanationRequest, model: str) -> ExplanationResponse:
"""
Generate model explanations.
Parameters:
- request: Explanation configuration
- model: Model name to use for explanations
Returns:
ExplanationResponse with detailed explanations
"""Structured response containing detailed explanations with utility methods for coordinate conversion and text integration.
class ExplanationResponse:
model_version: str
explanations: List[Explanation]
"""
Response from explanation request.
Attributes:
- model_version: Version of model used
- explanations: List of explanation objects for target segments
"""
def with_image_prompt_items_in_pixels(self, prompt: Prompt) -> ExplanationResponse:
"""
Convert image coordinates from normalized to pixel coordinates.
Parameters:
- prompt: Original prompt with image dimensions
Returns:
ExplanationResponse with pixel coordinates
"""
def with_text_from_prompt(self, request: ExplanationRequest) -> ExplanationResponse:
"""
Add text content to explanations for easier interpretation.
Parameters:
- request: Original explanation request
Returns:
ExplanationResponse with text content added
"""Detailed explanation for each target segment with support for different prompt item types.
class Explanation:
target: str
items: List[Union[
TextPromptItemExplanation,
ImagePromptItemExplanation,
TokenPromptItemExplanation,
TargetPromptItemExplanation
]]
"""
Explanation for a target text segment.
Attributes:
- target: Target text portion being explained
- items: Explanations for each prompt item type
"""
def with_image_prompt_items_in_pixels(self, prompt: Prompt) -> Explanation:
"""Convert image coordinates to pixel coordinates."""
def with_text_from_prompt(self, prompt: Prompt, target: str) -> Explanation:
"""Add text content for easier interpretation."""Different explanation types for various prompt content types.
class TextPromptItemExplanation:
scores: List[Union[TextScore, TextScoreWithRaw]]
"""
Explanation for text prompt items.
Attributes:
- scores: Importance scores for text segments
"""
class ImagePromptItemExplanation:
scores: List[ImageScore]
"""
Explanation for image prompt items.
Attributes:
- scores: Importance scores for image regions
"""
def in_pixels(self, prompt_item: PromptItem) -> ImagePromptItemExplanation:
"""Convert coordinates to pixel values."""
class TokenPromptItemExplanation:
scores: List[TokenScore]
"""
Explanation for token prompt items.
Attributes:
- scores: Importance scores for individual tokens
"""
class TargetPromptItemExplanation:
scores: List[Union[TargetScore, TargetScoreWithRaw]]
"""
Explanation for target text segments.
Attributes:
- scores: Importance scores for target text parts
"""Detailed scoring structures for different content types with positional information.
class TextScore:
start: int
length: int
score: float
"""
Importance score for text segment.
Attributes:
- start: Starting character index
- length: Length in characters
- score: Importance score (higher = more important)
"""
class ImageScore:
left: float
top: float
width: float
height: float
score: float
"""
Importance score for image region.
Attributes:
- left: Left coordinate (0-1, normalized)
- top: Top coordinate (0-1, normalized)
- width: Width (0-1, normalized)
- height: Height (0-1, normalized)
- score: Importance score
"""
class TokenScore:
score: float
"""
Importance score for individual token.
Attributes:
- score: Importance score for this token
"""
class TargetScore:
start: int
length: int
score: float
"""
Importance score for target text segment.
Attributes:
- start: Starting character index in target
- length: Length in characters
- score: Importance score
"""Configuration options for explanation detail level and scope.
class PromptGranularity(Enum):
Token = "token" # Token-level explanations
Word = "word" # Word-level explanations
Sentence = "sentence" # Sentence-level explanations
Paragraph = "paragraph" # Paragraph-level explanations
class TargetGranularity(Enum):
Complete = "complete" # Explain complete target
Token = "token" # Per-token target explanations
class CustomGranularity:
delimiter: str
"""
Custom granularity specification.
Attributes:
- delimiter: Custom delimiter for text splitting
"""
class ExplanationPostprocessing(Enum):
Square = "square" # Square each score
Absolute = "absolute" # Absolute value of each scoreComprehensive examples showing different explanation use cases and configurations:
from aleph_alpha_client import (
Client, ExplanationRequest, ExplanationResponse,
Prompt, Text, Image,
PromptGranularity, TargetGranularity, CustomGranularity,
ExplanationPostprocessing
)
client = Client(token="your-api-token")
# Basic text explanation
prompt = Prompt.from_text("The cat sat on the mat and looked around.")
request = ExplanationRequest(
prompt=prompt,
target="sat on the mat",
prompt_granularity=PromptGranularity.Word,
target_granularity=TargetGranularity.Complete
)
response = client.explain(request, model="luminous-extended")
# Process explanations
for explanation in response.explanations:
print(f"Target: '{explanation.target}'")
for item in explanation.items:
if isinstance(item, TextPromptItemExplanation):
for score in item.scores:
text_segment = prompt.items[0].text[score.start:score.start + score.length]
print(f" '{text_segment}': {score.score:.3f}")
# Enhanced explanation with text content
enhanced_response = response.with_text_from_prompt(request)
print("Enhanced explanation includes text content")
# Multimodal explanation (text + image)
image = Image.from_file("scene.jpg")
multimodal_prompt = Prompt([
Text.from_text("This image shows a beautiful landscape with"),
image,
Text.from_text("mountains in the background.")
])
multimodal_request = ExplanationRequest(
prompt=multimodal_prompt,
target="landscape",
prompt_granularity=PromptGranularity.Word,
postprocessing=ExplanationPostprocessing.Absolute
)
multimodal_response = client.explain(multimodal_request, model="luminous-extended")
for explanation in multimodal_response.explanations:
for item in explanation.items:
if isinstance(item, TextPromptItemExplanation):
print("Text explanations:")
for score in item.scores:
print(f" Text score: {score.score:.3f}")
elif isinstance(item, ImagePromptItemExplanation):
print("Image explanations:")
for score in item.scores:
print(f" Region ({score.left:.2f}, {score.top:.2f}) "
f"size ({score.width:.2f}x{score.height:.2f}): {score.score:.3f}")
# Convert image coordinates to pixels
pixel_response = multimodal_response.with_image_prompt_items_in_pixels(multimodal_prompt)
print("Converted to pixel coordinates")
# Fine-grained token-level explanation
token_request = ExplanationRequest(
prompt=Prompt.from_text("Machine learning revolutionizes data analysis."),
target="revolutionizes",
prompt_granularity=PromptGranularity.Token,
target_granularity=TargetGranularity.Token,
normalize=True
)
token_response = client.explain(token_request, model="luminous-extended")
for explanation in token_response.explanations:
print(f"Token-level explanation for: '{explanation.target}'")
for item in explanation.items:
if isinstance(item, TokenPromptItemExplanation):
for i, score in enumerate(item.scores):
print(f" Token {i}: {score.score:.3f}")
# Custom granularity with specific delimiter
custom_request = ExplanationRequest(
prompt=Prompt.from_text("First clause; second clause; third clause."),
target="second clause",
prompt_granularity=CustomGranularity(delimiter=";"),
postprocessing=ExplanationPostprocessing.Square
)
custom_response = client.explain(custom_request, model="luminous-extended")
# Explanation with attention controls
from aleph_alpha_client import TextControl, ControlTokenOverlap
controlled_text = Text(
text="Important information is highlighted here.",
controls=[
TextControl(
start=0, length=9, # "Important"
factor=2.0,
token_overlap=ControlTokenOverlap.Complete
)
]
)
controlled_request = ExplanationRequest(
prompt=Prompt([controlled_text]),
target="highlighted",
prompt_granularity=PromptGranularity.Word,
control_factor=1.5,
control_log_additive=True
)
controlled_response = client.explain(controlled_request, model="luminous-extended")
# Compare explanations with and without controls
print("Explanation shows impact of attention controls")
# Batch explanation analysis
def analyze_explanations(response: ExplanationResponse, threshold: float = 0.1):
"""Analyze explanations to find most important segments."""
important_segments = []
for explanation in response.explanations:
for item in explanation.items:
if isinstance(item, TextPromptItemExplanation):
for score in item.scores:
if abs(score.score) > threshold:
important_segments.append({
'target': explanation.target,
'start': score.start,
'length': score.length,
'score': score.score
})
return sorted(important_segments, key=lambda x: abs(x['score']), reverse=True)
# Analyze most important segments
important = analyze_explanations(response, threshold=0.05)
print("Most important segments:")
for segment in important[:5]: # Top 5
print(f" Score {segment['score']:.3f}: chars {segment['start']}-{segment['start']+segment['length']}")Install with Tessl CLI
npx tessl i tessl/pypi-aleph-alpha-client