CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-llama-index

Interface between LLMs and your data for building retrieval-augmented generation (RAG) applications

Pending
Overview
Eval results
Files

agents-workflows.mddocs/

Agents & Workflows

Multi-agent systems and workflow orchestration for complex reasoning tasks, tool usage, and multi-step problem solving with event-driven execution.

Capabilities

Base Agent System

Core agent classes that combine workflow execution with LLM interactions for tool-calling and reasoning tasks.

class BaseWorkflowAgent:
    """
    Base class for all workflow-based agents.
    
    Args:
        name: Agent identifier
        description: Agent capabilities description
        system_prompt: System prompt for LLM interactions
        tools: List of tools available to agent
        llm: Language model instance
        initial_state: Initial workflow state
        streaming: Enable streaming responses
        output_cls: Structured output class
    """
    def __init__(
        self,
        name="Agent",
        description="An agent that can perform a task",
        system_prompt=None,
        tools=None,
        llm=None,
        initial_state=None,
        streaming=True,
        output_cls=None,
        **kwargs
    ): ...

    def run(
        self,
        user_msg=None,
        chat_history=None,
        memory=None,
        max_iterations=None,
        **kwargs
    ):
        """
        Execute agent workflow.
        
        Args:
            user_msg: User input message
            chat_history: Previous conversation history
            memory: Memory instance for persistence
            max_iterations: Maximum reasoning steps
            
        Returns:
            WorkflowHandler: Async handler for results and streaming
        """

    async def get_tools(self, input_str=None):
        """Get available tools, optionally filtered by input."""

ReAct Agent

Reasoning-Action agent implementing the ReAct pattern for systematic problem solving with tool usage.

class ReActAgent(BaseWorkflowAgent):
    """
    ReAct agent with thought-action-observation loop.
    
    Args:
        reasoning_key: Context key for storing current reasoning
        output_parser: Parser for ReAct format responses
        formatter: Chat message formatter
        **kwargs: BaseWorkflowAgent arguments
    """
    def __init__(
        self,
        reasoning_key="current_reasoning",
        output_parser=None,
        formatter=None,
        **kwargs
    ): ...

Usage Example:

from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool

def add(a: int, b: int) -> int:
    """Add two numbers."""
    return a + b

def search_web(query: str) -> str:
    """Search the web for information."""
    return f"Search results for {query}"

agent = ReActAgent(
    name="research_assistant",
    description="Performs research and calculations",
    system_prompt="You are a helpful research assistant.",
    tools=[
        FunctionTool.from_defaults(fn=add),
        FunctionTool.from_defaults(fn=search_web),
    ],
    llm=llm
)

# Execute with automatic thought-action-observation loop
result = await agent.run("Search for Python tutorials and count how many you find")
print(result.response.content)

Function Calling Agent

Function calling agent optimized for LLMs with native function calling support, enabling parallel tool execution.

class FunctionAgent(BaseWorkflowAgent):
    """
    Function calling agent for function-calling LLMs.
    
    Args:
        scratchpad_key: Context key for scratchpad storage
        initial_tool_choice: Force initial tool selection
        allow_parallel_tool_calls: Enable parallel tool execution
        **kwargs: BaseWorkflowAgent arguments
    """
    def __init__(
        self,
        scratchpad_key="scratchpad",
        initial_tool_choice=None,
        allow_parallel_tool_calls=True,
        **kwargs
    ): ...

Usage Example:

from llama_index.core.agent import FunctionAgent
from llama_index.llms.openai import OpenAI

def get_weather(location: str) -> str:
    """Get weather for a location."""
    return f"The weather in {location} is sunny"

def get_news(topic: str) -> str:
    """Get news about a topic."""
    return f"Latest news about {topic}"

agent = FunctionAgent(
    name="information_agent",
    description="Provides weather and news information",
    tools=[
        FunctionTool.from_defaults(fn=get_weather),
        FunctionTool.from_defaults(fn=get_news),
    ],
    llm=OpenAI(model="gpt-4"),  # Function calling LLM required
    allow_parallel_tool_calls=True
)

result = await agent.run("What's the weather in NYC and latest tech news?")

Code Execution Agent

Agent capable of generating and executing Python code within a controlled environment for data analysis and computation.

class CodeActAgent(BaseWorkflowAgent):
    """
    Agent that can execute code within <execute> tags.
    
    Args:
        code_execute_fn: Function to execute generated code
        scratchpad_key: Context key for scratchpad storage
        code_act_system_prompt: System prompt for code generation
        **kwargs: BaseWorkflowAgent arguments
    """
    def __init__(
        self,
        code_execute_fn,
        scratchpad_key="scratchpad",
        code_act_system_prompt=None,
        **kwargs
    ): ...

Usage Example:

import subprocess
import tempfile
import os

async def code_execute_fn(code: str) -> Dict[str, Any]:
    """Execute Python code and return results."""
    try:
        with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
            f.write(code)
            temp_file = f.name
            
        result = subprocess.run(
            ['python', temp_file], 
            capture_output=True, 
            text=True, 
            timeout=30
        )
        
        os.unlink(temp_file)
        
        return {
            'stdout': result.stdout,
            'stderr': result.stderr,
            'returncode': result.returncode
        }
    except Exception as e:
        return {'error': str(e)}

agent = CodeActAgent(
    code_execute_fn=code_execute_fn,
    name="data_analyst",
    description="Performs data analysis using Python",
    system_prompt="You can execute Python code to analyze data and solve problems.",
    llm=llm
)

result = await agent.run("Calculate the mean and standard deviation of [1, 2, 3, 4, 5]")

Multi-Agent Workflow

Orchestration system for coordinating multiple specialized agents with handoff capabilities and shared state management.

class AgentWorkflow:
    """
    Multi-agent workflow with handoff support.
    
    Args:
        agents: List of participating agents
        initial_state: Initial workflow state
        root_agent: Starting agent name
        handoff_prompt: Prompt for handoff decisions
        output_cls: Structured output class
    """
    def __init__(
        self,
        agents,
        initial_state=None,
        root_agent=None,
        handoff_prompt=None,
        output_cls=None,
        **kwargs
    ): ...

    def run(self, user_msg=None, **kwargs):
        """Execute multi-agent workflow with automatic handoffs."""

    @classmethod
    def from_tools_or_functions(
        cls,
        tools_or_functions,
        llm,
        system_prompt=None,
        output_cls=None,
        **kwargs
    ):
        """Create workflow from tools, automatically selecting agent type."""

Usage Example:

from llama_index.core.agent import AgentWorkflow, ReActAgent, FunctionAgent

# Define specialized agents
calculator_agent = ReActAgent(
    name="calculator",
    description="Performs arithmetic operations",
    tools=[add_tool, subtract_tool, multiply_tool],
    llm=llm
)

research_agent = FunctionAgent(
    name="researcher", 
    description="Searches for information online",
    tools=[web_search_tool, wikipedia_tool],
    llm=llm,
    can_handoff_to=["calculator"]  # Can hand off to calculator
)

writer_agent = FunctionAgent(
    name="writer",
    description="Writes and formats content", 
    tools=[format_text_tool],
    llm=llm
)

# Create multi-agent workflow
workflow = AgentWorkflow(
    agents=[research_agent, calculator_agent, writer_agent],
    root_agent="researcher",  # Starting agent
    initial_state={"document_type": "report"}
)

# Execute with automatic handoffs
result = await workflow.run(
    "Research the GDP of top 5 countries, calculate their average, and write a summary"
)

Event System

Event-driven execution model for workflow communication and state management.

class Event:
    """Base event class for workflow communication."""

class StartEvent(Event):
    """Workflow entry point - accepts arbitrary attributes."""

class StopEvent(Event):
    """
    Workflow termination with result.
    
    Args:
        result: Final workflow result
    """
    result: Any

class AgentInput(Event):
    """
    LLM input to agent.
    
    Args:
        input: List of chat messages
        current_agent_name: Active agent identifier
    """
    input: List[ChatMessage]
    current_agent_name: str

class AgentOutput(Event):
    """
    Agent response output.
    
    Args:
        response: Agent response message
        structured_response: Structured output data
        current_agent_name: Active agent identifier
        tool_calls: List of tool calls made
    """
    response: ChatMessage
    structured_response: Optional[Dict[str, Any]]
    current_agent_name: str
    tool_calls: List[ToolSelection]

class ToolCall(Event):
    """
    Tool execution request.
    
    Args:
        tool_name: Name of tool to execute
        tool_kwargs: Tool arguments
        tool_id: Unique tool call identifier
    """
    tool_name: str
    tool_kwargs: dict
    tool_id: str

class ToolCallResult(Event):
    """
    Tool execution result.
    
    Args:
        tool_name: Name of executed tool
        tool_output: Tool execution output
        return_direct: Whether to return result directly
    """
    tool_name: str
    tool_output: ToolOutput
    return_direct: bool

Tool Integration

Comprehensive tool integration patterns for extending agent capabilities with custom functions and external services.

class FunctionTool:
    """Convert functions to agent tools."""
    
    @classmethod
    def from_defaults(
        cls,
        fn,
        name=None,
        description=None,
        return_direct=False,
        **kwargs
    ):
        """
        Create tool from function.
        
        Args:
            fn: Function to convert to tool
            name: Optional custom tool name
            description: Optional custom description
            return_direct: Return result without further processing
        """

class ToolSpec:
    """Base class for multi-function tool specifications."""
    
    spec_functions: List[str]  # Function names to expose
    
    def to_tool_list(self):
        """Convert specification to tool list."""

Tool Usage Example:

from llama_index.core.tools import FunctionTool, ToolSpec

# Simple function tool
def search_web(query: str, max_results: int = 5) -> str:
    """Search the web for information."""
    return f"Search results for {query}"

search_tool = FunctionTool.from_defaults(
    fn=search_web,
    name="web_search",
    description="Search the web for current information"
)

# Tool specification for related functions
class WeatherToolSpec(ToolSpec):
    """Weather-related tools."""
    
    spec_functions = ["get_current_weather", "get_forecast"]
    
    def __init__(self, api_key: str):
        self.api_key = api_key
        
    def get_current_weather(self, location: str) -> str:
        """Get current weather for location."""
        return f"Current weather in {location}"
        
    def get_forecast(self, location: str, days: int = 3) -> str:
        """Get weather forecast."""
        return f"Forecast for {location} for {days} days"

# Use tools with agent
weather_spec = WeatherToolSpec(api_key="your-key")
agent = FunctionAgent(
    tools=[search_tool] + weather_spec.to_tool_list(),
    llm=llm
)

Memory and State Management

Persistent memory and state management for maintaining context across agent interactions and workflow executions.

class BaseMemory:
    """Base class for agent memory systems."""
    
    def get_all(self):
        """Get all stored messages."""
    
    def put(self, message):
        """Store a message in memory."""
    
    def reset(self):
        """Clear all stored messages."""

class ChatMemoryBuffer(BaseMemory):
    """
    Token-limited chat memory buffer.
    
    Args:
        token_limit: Maximum tokens to store
        llm: LLM for token counting
    """
    def __init__(self, token_limit=4000, llm=None): ...

class VectorMemory(BaseMemory):
    """
    Vector-based semantic memory.
    
    Args:
        vector_index: Vector index for storage
        retriever_kwargs: Arguments for retriever
    """
    def __init__(self, vector_index=None, retriever_kwargs=None): ...

Memory Usage Example:

from llama_index.core.memory import ChatMemoryBuffer

# Create persistent memory
memory = ChatMemoryBuffer.from_defaults(
    token_limit=4000,
    llm=llm
)

# Use across multiple interactions
agent = ReActAgent(tools=tools, llm=llm)

# First conversation
result1 = await agent.run(
    user_msg="My name is Alice and I like Python programming",
    memory=memory
)

# Second conversation - memory persists
result2 = await agent.run(
    user_msg="What is my name and what do I like?",
    memory=memory  # Same memory instance
)

Streaming and Real-time Processing

Event streaming capabilities for real-time monitoring of agent execution and intermediate results.

class AgentStream(Event):
    """
    Streaming response from agent.
    
    Args:
        delta: New content chunk
        response: Full response so far
        current_agent_name: Active agent
        tool_calls: Tool calls in progress
    """
    delta: str
    response: str
    current_agent_name: str
    tool_calls: List[ToolSelection]

class WorkflowHandler:
    """Handler for workflow execution with streaming support."""
    
    def stream_events(self):
        """Stream events as they occur during execution."""
    
    async def __aenter__(self):
        """Async context manager entry."""
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """Async context manager exit."""

Streaming Example:

# Enable streaming
agent = FunctionAgent(
    tools=tools,
    llm=llm,
    streaming=True
)

# Execute with streaming
handler = agent.run("Analyze this complex dataset...")

# Stream events in real-time
async for event in handler.stream_events():
    if isinstance(event, AgentStream):
        print(f"Agent thinking: {event.delta}")
    elif isinstance(event, ToolCall):
        print(f"Using tool: {event.tool_name}")
    elif isinstance(event, ToolCallResult):
        print(f"Tool result: {event.tool_output.content}")

# Get final result
final_result = await handler
print(f"Final answer: {final_result.response.content}")

Error Handling and Retry

Built-in error handling and retry mechanisms for robust agent execution with automatic recovery from parsing and execution errors.

class WorkflowRuntimeError(Exception):
    """Exception raised during workflow execution."""

# Agent automatically retries on parsing errors
# Maximum iterations prevent infinite loops
agent = ReActAgent(tools=tools, llm=llm)

try:
    result = await agent.run(
        user_msg="Complex multi-step task",
        max_iterations=50  # Prevent infinite reasoning loops
    )
except WorkflowRuntimeError as e:
    print(f"Agent execution failed: {e}")

Install with Tessl CLI

npx tessl i tessl/pypi-llama-index

docs

agents-workflows.md

data-indexing.md

document-processing.md

index.md

llm-integration.md

prompts.md

query-processing.md

response-synthesis.md

retrievers.md

storage-settings.md

tile.json