Kiln AI is a comprehensive platform for building, evaluating, and deploying AI systems with dataset management, model fine-tuning, RAG, and evaluation capabilities.
Running AI tasks with model adapters, including configuration, streaming support, and output formatting. Adapters provide a unified interface for executing tasks across different AI providers and models.
Create adapters for executing tasks with specific models and providers.
from kiln_ai.adapters import adapter_for_task
def adapter_for_task(
task,
model_name: str,
provider: str | None = None,
config: dict | None = None
):
"""
Create an adapter for executing a task with a specific model.
Parameters:
- task: Task instance to run
- model_name (str): Name of the model to use (e.g., "gpt_4o", "claude_3_5_sonnet")
- provider (str | None): Provider name (e.g., "openai", "anthropic", "groq")
- config (dict | None): Additional configuration options (temperature, max_tokens, etc.)
Returns:
BaseAdapter instance configured for the task
"""Abstract base class defining the adapter interface.
class BaseAdapter:
"""
Abstract adapter interface for model execution.
Methods:
- invoke(): Execute task synchronously
- stream(): Execute task with streaming response
- validate_config(): Validate adapter configuration
"""
async def invoke(self, input_data: str) -> 'RunOutput':
"""
Execute task synchronously.
Parameters:
- input_data (str): Input for the task
Returns:
RunOutput: Execution result with output and metadata
"""
async def stream(self, input_data: str):
"""
Execute task with streaming response.
Parameters:
- input_data (str): Input for the task
Yields:
str: Streaming output chunks
"""
def validate_config(self) -> None:
"""
Validate adapter configuration.
Raises:
ValueError: If configuration is invalid
"""
class AdapterConfig:
"""
Configuration for adapters.
Properties:
- temperature (float | None): Sampling temperature (0-2)
- max_tokens (int | None): Maximum tokens to generate
- top_p (float | None): Nucleus sampling parameter (0-1)
- top_k (int | None): Top-k sampling parameter
- stop (list[str] | None): Stop sequences
- seed (int | None): Random seed for reproducibility
"""Container for model run results with metadata and usage tracking.
class RunOutput:
"""
Container for model run results.
Properties:
- output (str): Generated output text
- metadata (dict): Additional metadata about the run
- usage (Usage | None): Token usage information
- raw_response (dict | None): Raw response from the model
"""LiteLLM adapter supporting 100+ models through unified interface.
class LiteLlmAdapter(BaseAdapter):
"""
LiteLLM adapter implementation supporting 100+ models.
Supports:
- OpenAI, Anthropic, Google, Groq, Together AI, and many more
- Streaming and non-streaming modes
- Tool/function calling
- Structured JSON output
"""
async def invoke(self, input_data: str) -> 'RunOutput':
"""
Execute task synchronously with LiteLLM.
Parameters:
- input_data (str): Input for the task
Returns:
RunOutput: Execution result
"""
async def stream(self, input_data: str):
"""
Execute task with streaming response.
Parameters:
- input_data (str): Input for the task
Yields:
str: Streaming output chunks
"""
class LiteLlmConfig:
"""
LiteLLM-specific configuration.
Properties:
- model (str): Model identifier
- provider (str): Provider name
- api_key (str | None): API key for authentication
- base_url (str | None): Custom API base URL
- temperature (float | None): Sampling temperature
- max_tokens (int | None): Maximum tokens
- top_p (float | None): Nucleus sampling
- stop (list[str] | None): Stop sequences
"""Get provider-specific configuration for LiteLLM.
def litellm_core_provider_config(provider_name: str):
"""
Get LiteLLM provider configuration.
Parameters:
- provider_name (str): Provider identifier
Returns:
LiteLlmCoreConfig: Provider configuration with API settings
"""
class LiteLlmCoreConfig:
"""
LiteLLM core configuration.
Properties:
- api_key (str | None): API key
- api_base (str | None): Base URL for API
- timeout (int | None): Request timeout in seconds
"""Utilities for working with model providers and configurations.
def get_config_value(key: str, default=None):
"""
Get configuration value from Kiln config.
Parameters:
- key (str): Configuration key
- default: Default value if key not found
Returns:
Any: Configuration value
"""
def check_provider_warnings(provider: str, model: str) -> list:
"""
Check for provider capability warnings.
Parameters:
- provider (str): Provider name
- model (str): Model identifier
Returns:
list[ModelProviderWarning]: List of warnings
"""
def builtin_model_from(model_id: str):
"""
Get built-in model information.
Parameters:
- model_id (str): Model identifier
Returns:
KilnModel | None: Model definition or None if not found
"""
def core_provider(provider: str) -> str:
"""
Get core provider name from provider identifier.
Parameters:
- provider (str): Provider identifier (may include custom prefix)
Returns:
str: Core provider name
"""
def parse_custom_model_id(model_id: str) -> tuple:
"""
Parse custom model identifier.
Parameters:
- model_id (str): Custom model ID in format "provider::model"
Returns:
tuple[str, str]: (provider, model) tuple
"""
def kiln_model_provider_from(provider_name: str):
"""
Get model provider instance.
Parameters:
- provider_name (str): Provider name
Returns:
KilnModelProvider: Provider instance
"""
def lite_llm_provider_model(provider: str, model: str) -> str:
"""
Format model identifier for LiteLLM.
Parameters:
- provider (str): Provider name
- model (str): Model identifier
Returns:
str: LiteLLM-formatted model string
"""
def finetune_from_id(finetune_id: str, parent_task):
"""
Load fine-tune by ID.
Parameters:
- finetune_id (str): Fine-tune identifier
- parent_task: Parent task instance
Returns:
Finetune: Fine-tune instance
"""
def finetune_provider_model(finetune) -> tuple:
"""
Get provider and model for fine-tune.
Parameters:
- finetune: Finetune instance
Returns:
tuple[str, str]: (provider, model) tuple
"""
def get_model_and_provider(
model_name: str | None,
provider: str | None,
finetune_id: str | None,
task
) -> tuple:
"""
Resolve model and provider from parameters.
Parameters:
- model_name (str | None): Model name
- provider (str | None): Provider name
- finetune_id (str | None): Fine-tune ID
- task: Task instance
Returns:
tuple[KilnModel, str]: (model, provider) tuple
"""
def provider_name_from_id(provider_id: str) -> str:
"""
Extract provider name from identifier.
Parameters:
- provider_id (str): Provider identifier
Returns:
str: Provider name
"""
def lite_llm_core_config_for_provider(provider: str):
"""
Get LiteLLM core config for provider.
Parameters:
- provider (str): Provider name
Returns:
LiteLlmCoreConfig: Core configuration
"""
class ModelProviderWarning:
"""
Warning about provider capabilities.
Properties:
- message (str): Warning message
- severity (str): Warning severity level
"""Format messages for chat-based model APIs.
class ChatFormatter:
"""
Format messages for chat APIs.
Methods:
- format(): Format single message
- format_messages(): Format message list
"""
def format(self, content: str, role: str = "user") -> dict:
"""
Format single message.
Parameters:
- content (str): Message content
- role (str): Message role (user, assistant, system)
Returns:
dict: Formatted message
"""
def format_messages(self, messages: list) -> list:
"""
Format message list.
Parameters:
- messages (list): List of ChatMessage instances
Returns:
list[dict]: Formatted messages
"""
class ChatMessage:
"""
Single chat message.
Properties:
- role (str): Message role (user, assistant, system)
- content (str): Message content
"""
class ChatStrategy:
"""
Chat formatting strategy.
Values:
- openai: OpenAI chat format
- anthropic: Anthropic chat format
- generic: Generic chat format
"""
openai = "openai"
anthropic = "anthropic"
generic = "generic"
def get_chat_formatter(strategy: str) -> 'ChatFormatter':
"""
Get formatter instance for strategy.
Parameters:
- strategy (str): Chat strategy name
Returns:
ChatFormatter: Formatter instance
"""from kiln_ai.datamodel import Task
from kiln_ai.adapters import adapter_for_task
# Create or load a task
task = Task(
name="summarizer",
instruction="Summarize the following text concisely."
)
# Create adapter with specific model
adapter = adapter_for_task(
task,
model_name="gpt_4o",
provider="openai",
config={
"temperature": 0.7,
"max_tokens": 500
}
)
# Execute task
input_text = "Long article text here..."
result = await adapter.invoke(input_text)
print(f"Summary: {result.output}")
print(f"Tokens used: {result.usage.total_tokens}")from kiln_ai.datamodel import Task
from kiln_ai.adapters import adapter_for_task
task = Task(
name="story_generator",
instruction="Write a creative story about the given topic."
)
adapter = adapter_for_task(task, model_name="claude_3_5_sonnet", provider="anthropic")
# Stream response
async for chunk in adapter.stream("space exploration"):
print(chunk, end="", flush=True)from kiln_ai.datamodel import Task
from kiln_ai.adapters import adapter_for_task
task = Task.load_from_file("path/to/task.kiln")
# Test same task with different models
models = [
("gpt_4o", "openai"),
("claude_3_5_sonnet", "anthropic"),
("llama_3_1_8b", "groq")
]
input_data = "Test input"
for model_name, provider in models:
adapter = adapter_for_task(task, model_name=model_name, provider=provider)
result = await adapter.invoke(input_data)
print(f"{model_name}: {result.output}")from kiln_ai.adapters import adapter_for_task
# Advanced configuration
config = {
"temperature": 0.9,
"max_tokens": 2000,
"top_p": 0.95,
"stop": ["END", "STOP"],
"seed": 42 # For reproducibility
}
adapter = adapter_for_task(
task,
model_name="gpt_4o",
provider="openai",
config=config
)
result = await adapter.invoke("Generate creative content")from kiln_ai.datamodel import Task
from kiln_ai.adapters import adapter_for_task
import json
# Task with JSON schema
task = Task(
name="data_extractor",
instruction="Extract structured information from the text.",
output_json_schema=json.dumps({
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"email": {"type": "string"}
},
"required": ["name", "email"]
})
)
adapter = adapter_for_task(task, model_name="gpt_4o", provider="openai")
result = await adapter.invoke("John Doe is 30 years old. Email: john@example.com")
data = json.loads(result.output)
print(f"Name: {data['name']}, Email: {data['email']}")from kiln_ai.adapters import adapter_for_task
from kiln_ai.adapters.provider_tools import check_provider_warnings
task = Task.load_from_file("path/to/task.kiln")
# Check for warnings before running
warnings = check_provider_warnings("openai", "gpt_4o")
if warnings:
for warning in warnings:
print(f"Warning: {warning.message}")
# Create adapter with error handling
try:
adapter = adapter_for_task(task, model_name="gpt_4o", provider="openai")
adapter.validate_config()
result = await adapter.invoke("input text")
except ValueError as e:
print(f"Configuration error: {e}")
except Exception as e:
print(f"Execution error: {e}")from kiln_ai.datamodel import Task, Finetune
from kiln_ai.adapters import adapter_for_task
# Load task with fine-tuned model
task = Task.load_from_file("path/to/task.kiln")
# Use fine-tuned model
adapter = adapter_for_task(
task,
model_name=None,
provider=None,
config={"finetune_id": "ft-abc123"}
)
result = await adapter.invoke("input for fine-tuned model")Install with Tessl CLI
npx tessl i tessl/pypi-kiln-ai