Interface between LLMs and your data
—
Agent implementations supporting ReAct reasoning, function calling, and workflow orchestration with comprehensive tool integration. The agent framework enables autonomous task execution through iterative reasoning, tool usage, and multi-step problem solving.
Foundation interfaces for agent implementations with standardized interaction patterns and execution flows.
class BaseWorkflowAgent:
"""
Base interface for workflow-based agent implementations.
Workflow agents coordinate complex multi-step processes using
structured workflows and event-driven execution patterns.
"""
def __init__(self, **kwargs): ...
def run(self, input_data: Any, **kwargs) -> Any:
"""
Execute agent workflow with input data.
Parameters:
- input_data: Any, input data for workflow execution
- **kwargs: additional execution parameters
Returns:
- Any, workflow execution result
"""
def stream_run(self, input_data: Any, **kwargs) -> Iterator[Any]:
"""
Stream workflow execution results.
Parameters:
- input_data: Any, input data for workflow execution
- **kwargs: additional execution parameters
Returns:
- Iterator[Any], streaming workflow results
"""ReAct (Reason + Act) agent that combines reasoning and action in iterative cycles for complex problem solving.
class ReActAgent:
"""
ReAct (Reason + Act) agent implementation for iterative reasoning and action.
The ReAct pattern combines reasoning traces and task-specific actions,
allowing the agent to dynamically plan, act, and observe in cycles.
Parameters:
- tools: List[BaseTool], available tools for the agent
- llm: LLM, language model for reasoning and planning
- memory: Optional[BaseMemory], memory system for conversation history
- max_iterations: int, maximum number of reasoning iterations
- react_chat_formatter: Optional[ReActChatFormatter], formatter for ReAct messages
- output_parser: Optional[ReActOutputParser], parser for ReAct output
- callback_manager: Optional[CallbackManager], callback management system
- verbose: bool, whether to enable verbose logging
"""
def __init__(
self,
tools: List[BaseTool],
llm: LLM,
memory: Optional[BaseMemory] = None,
max_iterations: int = 10,
react_chat_formatter: Optional[ReActChatFormatter] = None,
output_parser: Optional[ReActOutputParser] = None,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
**kwargs
): ...
def reset(self) -> None:
"""Reset agent state and clear memory."""
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
**kwargs
) -> AgentChatResponse:
"""
Execute chat interaction with ReAct reasoning.
Parameters:
- message: str, user message or query
- chat_history: Optional[List[ChatMessage]], conversation history
- tool_choice: Optional[Union[str, dict]], tool selection preference
- **kwargs: additional chat parameters
Returns:
- AgentChatResponse, agent response with reasoning trace
"""
def stream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
tool_choice: Optional[Union[str, dict]] = None,
**kwargs
) -> StreamingAgentChatResponse:
"""
Stream chat interaction with ReAct reasoning.
Parameters:
- message: str, user message or query
- chat_history: Optional[List[ChatMessage]], conversation history
- tool_choice: Optional[Union[str, dict]], tool selection preference
- **kwargs: additional chat parameters
Returns:
- StreamingAgentChatResponse, streaming agent response
"""Agent implementation optimized for function calling and tool execution with structured reasoning.
class FunctionAgent:
"""
Function-calling agent optimized for structured tool usage and execution.
Function agents excel at using well-defined tools and APIs to accomplish
tasks through structured function calls and parameter passing.
Parameters:
- tools: List[BaseTool], available tools for function calling
- llm: LLM, language model supporting function calling
- system_prompt: Optional[str], system prompt for agent behavior
- max_function_calls: int, maximum function calls per interaction
- callback_manager: Optional[CallbackManager], callback management
- verbose: bool, whether to enable verbose logging
"""
def __init__(
self,
tools: List[BaseTool],
llm: LLM,
system_prompt: Optional[str] = None,
max_function_calls: int = 5,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
**kwargs
): ...
def chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
**kwargs
) -> AgentChatResponse:
"""Execute function-calling chat interaction."""
def stream_chat(
self,
message: str,
chat_history: Optional[List[ChatMessage]] = None,
**kwargs
) -> StreamingAgentChatResponse:
"""Stream function-calling chat interaction."""Specialized agent for code generation, execution, and iterative development tasks.
class CodeActAgent:
"""
Code action agent for code generation, execution, and debugging.
CodeAct agents can write, execute, and iterate on code to solve
programming tasks and data analysis problems.
Parameters:
- tools: List[BaseTool], tools including code execution environment
- llm: LLM, language model for code generation
- system_prompt: Optional[str], system prompt for coding behavior
- max_iterations: int, maximum code-action iterations
- callback_manager: Optional[CallbackManager], callback management
- verbose: bool, whether to enable verbose logging
"""
def __init__(
self,
tools: List[BaseTool],
llm: LLM,
system_prompt: Optional[str] = None,
max_iterations: int = 10,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
**kwargs
): ...Multi-agent workflow orchestration for complex, distributed problem solving.
class AgentWorkflow:
"""
Multi-agent workflow system for orchestrating complex agent interactions.
Agent workflows coordinate multiple agents, manage state transitions,
and handle complex multi-step processes requiring different agent capabilities.
Parameters:
- agents: Dict[str, BaseAgent], named agents in the workflow
- workflow_definition: dict, workflow structure and execution flow
- state_manager: Optional[StateManager], state management system
- callback_manager: Optional[CallbackManager], callback management
"""
def __init__(
self,
agents: Dict[str, BaseAgent],
workflow_definition: dict,
state_manager: Optional[StateManager] = None,
callback_manager: Optional[CallbackManager] = None,
**kwargs
): ...
def run(
self,
initial_input: Any,
**kwargs
) -> Any:
"""
Execute multi-agent workflow.
Parameters:
- initial_input: Any, initial input to workflow
- **kwargs: additional workflow parameters
Returns:
- Any, workflow execution result
"""Specialized components for ReAct pattern implementation and output processing.
class ReActOutputParser:
"""
Parser for ReAct agent output processing and action extraction.
Parses ReAct-formatted output to extract reasoning steps, actions,
and observations for agent execution cycles.
Parameters:
- output_format: str, expected output format (json, text, structured)
"""
def __init__(self, output_format: str = "text"): ...
def parse(self, output: str) -> Dict[str, Any]:
"""
Parse ReAct output into structured components.
Parameters:
- output: str, raw ReAct output from LLM
Returns:
- Dict[str, Any], parsed components (thought, action, observation)
"""
def format_action(self, action: str, action_input: str) -> str:
"""
Format action for ReAct execution.
Parameters:
- action: str, action name
- action_input: str, action parameters
Returns:
- str, formatted action string
"""
class ReActChatFormatter:
"""
Formatter for ReAct chat messages and conversation structure.
Formats chat messages to support ReAct reasoning pattern with
proper structure for thought, action, and observation cycles.
Parameters:
- system_prompt: Optional[str], system prompt for ReAct behavior
- context_separator: str, separator for context sections
"""
def __init__(
self,
system_prompt: Optional[str] = None,
context_separator: str = "\\n\\n"
): ...
def format(
self,
tools: List[BaseTool],
chat_history: List[ChatMessage],
current_reasoning: Optional[str] = None
) -> List[ChatMessage]:
"""
Format messages for ReAct interaction.
Parameters:
- tools: List[BaseTool], available tools
- chat_history: List[ChatMessage], conversation history
- current_reasoning: Optional[str], current reasoning trace
Returns:
- List[ChatMessage], formatted messages for ReAct
"""Response structures for agent interactions with comprehensive metadata and source tracking.
class AgentChatResponse:
"""
Response from agent chat interaction with reasoning trace and metadata.
Parameters:
- response: str, main response text
- sources: Optional[List[ToolOutput]], tools and sources used
- source_nodes: Optional[List[NodeWithScore]], retrieved source nodes
- reasoning: Optional[str], agent reasoning trace
- tool_calls: Optional[List[ToolCall]], tool calls made by agent
"""
def __init__(
self,
response: str,
sources: Optional[List[ToolOutput]] = None,
source_nodes: Optional[List[NodeWithScore]] = None,
reasoning: Optional[str] = None,
tool_calls: Optional[List[ToolCall]] = None,
**kwargs
): ...
@property
def response_txt(self) -> str:
"""Get response text."""
def __str__(self) -> str:
"""String representation of response."""
class StreamingAgentChatResponse:
"""
Streaming response from agent chat interaction.
Parameters:
- response_gen: Iterator[str], response text generator
- sources: Optional[List[ToolOutput]], tools and sources used
- source_nodes: Optional[List[NodeWithScore]], retrieved source nodes
- reasoning: Optional[str], agent reasoning trace
"""
def __init__(
self,
response_gen: Iterator[str],
sources: Optional[List[ToolOutput]] = None,
source_nodes: Optional[List[NodeWithScore]] = None,
reasoning: Optional[str] = None,
**kwargs
): ...
def response_gen(self) -> Iterator[str]:
"""Get response generator."""Event-driven system for agent communication and workflow coordination.
class AgentInput:
"""
Agent input event for workflow communication.
Parameters:
- input_data: Any, input data for agent processing
- metadata: Optional[dict], additional input metadata
"""
def __init__(
self,
input_data: Any,
metadata: Optional[dict] = None
): ...
class AgentOutput:
"""
Agent output event for workflow results.
Parameters:
- output_data: Any, output data from agent processing
- metadata: Optional[dict], additional output metadata
- tool_calls: Optional[List[ToolCall]], tool calls made
"""
def __init__(
self,
output_data: Any,
metadata: Optional[dict] = None,
tool_calls: Optional[List[ToolCall]] = None
): ...
class AgentStream:
"""
Agent streaming event for continuous output.
Parameters:
- stream_data: Iterator[Any], streaming data
- metadata: Optional[dict], stream metadata
"""
def __init__(
self,
stream_data: Iterator[Any],
metadata: Optional[dict] = None
): ...
class ToolCall:
"""
Tool call event representing tool usage by agent.
Parameters:
- tool_name: str, name of the tool called
- tool_input: dict, parameters passed to tool
- tool_id: Optional[str], unique identifier for tool call
"""
def __init__(
self,
tool_name: str,
tool_input: dict,
tool_id: Optional[str] = None
): ...
class ToolCallResult:
"""
Result of tool call execution.
Parameters:
- tool_call: ToolCall, original tool call
- result: Any, tool execution result
- error: Optional[str], error message if tool call failed
"""
def __init__(
self,
tool_call: ToolCall,
result: Any,
error: Optional[str] = None
): ...Foundation interface for all tool implementations with standardized execution methods.
class BaseTool:
"""
Base interface for tool implementations.
Tools provide specific capabilities that agents can use to accomplish tasks,
from simple functions to complex API integrations.
Parameters:
- metadata: ToolMetadata, tool description and parameter schema
- fn: Optional[Callable], function implementation for the tool
"""
def __init__(
self,
metadata: ToolMetadata,
fn: Optional[Callable] = None,
**kwargs
): ...
def call(self, input: Any, **kwargs) -> ToolOutput:
"""
Execute tool with input parameters.
Parameters:
- input: Any, input data for tool execution
- **kwargs: additional tool parameters
Returns:
- ToolOutput, tool execution result
"""
def __call__(self, input: Any, **kwargs) -> ToolOutput:
"""Callable interface for tool execution."""
@property
def metadata(self) -> ToolMetadata:
"""Get tool metadata and description."""
class AsyncBaseTool(BaseTool):
"""
Base interface for asynchronous tool implementations.
"""
async def acall(self, input: Any, **kwargs) -> ToolOutput:
"""
Asynchronously execute tool with input parameters.
Parameters:
- input: Any, input data for tool execution
- **kwargs: additional tool parameters
Returns:
- ToolOutput, tool execution result
"""Tools that wrap Python functions for use by agents with automatic parameter handling.
class FunctionTool(BaseTool):
"""
Tool wrapper for Python functions with automatic parameter handling.
Parameters:
- fn: Callable, Python function to wrap as tool
- metadata: Optional[ToolMetadata], tool metadata (auto-generated if None)
- async_fn: Optional[Callable], async version of function
"""
def __init__(
self,
fn: Callable,
metadata: Optional[ToolMetadata] = None,
async_fn: Optional[Callable] = None,
**kwargs
): ...
@classmethod
def from_defaults(
cls,
fn: Callable,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
async_fn: Optional[Callable] = None,
**kwargs
) -> "FunctionTool":
"""
Create FunctionTool with default metadata generation.
Parameters:
- fn: Callable, function to wrap
- name: Optional[str], tool name (defaults to function name)
- description: Optional[str], tool description (from docstring)
- return_direct: bool, whether to return result directly
- async_fn: Optional[Callable], async version of function
Returns:
- FunctionTool, configured function tool
"""Tools that integrate query engines for information retrieval and question answering.
class QueryEngineTool(BaseTool):
"""
Tool wrapper for query engines to enable agent-based information retrieval.
Parameters:
- query_engine: BaseQueryEngine, query engine for information retrieval
- metadata: ToolMetadata, tool description and usage information
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
**kwargs
): ...
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> "QueryEngineTool":
"""Create QueryEngineTool with default metadata."""Tools that wrap retrievers for agent-based information gathering and context retrieval.
class RetrieverTool(BaseTool):
"""
Tool wrapper for retrievers to enable agent-based information gathering.
Parameters:
- retriever: BaseRetriever, retriever for information gathering
- metadata: ToolMetadata, tool description and usage information
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
**kwargs
): ...
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
name: Optional[str] = None,
description: Optional[str] = None,
**kwargs
) -> "RetrieverTool":
"""Create RetrieverTool with default metadata."""Advanced tools for query planning and multi-step information retrieval strategies.
class QueryPlanTool(BaseTool):
"""
Tool for query planning and multi-step information retrieval.
Query plan tools break down complex queries into manageable steps
and coordinate multiple retrieval operations.
Parameters:
- query_engine: BaseQueryEngine, query engine for execution
- metadata: ToolMetadata, tool metadata and description
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
**kwargs
): ...Metadata structures for describing tool capabilities, parameters, and usage patterns.
class ToolMetadata:
"""
Metadata describing tool capabilities and interface.
Parameters:
- name: str, tool name identifier
- description: str, human-readable tool description
- fn_schema: Optional[Type[BaseModel]], Pydantic schema for parameters
- return_direct: bool, whether tool output should be returned directly
"""
def __init__(
self,
name: str,
description: str,
fn_schema: Optional[Type[BaseModel]] = None,
return_direct: bool = False,
**kwargs
): ...
def to_openai_tool(self) -> dict:
"""Convert to OpenAI tool format."""
def get_parameters_dict(self) -> dict:
"""Get parameter schema as dictionary."""
class ToolOutput:
"""
Output from tool execution with content and metadata.
Parameters:
- content: str, main output content
- tool_name: str, name of tool that generated output
- raw_input: Optional[dict], raw input parameters
- raw_output: Optional[Any], raw output data
- is_error: bool, whether output represents an error
"""
def __init__(
self,
content: str,
tool_name: str,
raw_input: Optional[dict] = None,
raw_output: Optional[Any] = None,
is_error: bool = False,
**kwargs
): ...
def __str__(self) -> str:
"""String representation of tool output."""Utilities for tool selection, execution, and result processing in agent workflows.
class ToolSelection:
"""
Tool selection with parameters for agent execution.
Parameters:
- tool_id: str, identifier of selected tool
- tool_name: str, name of selected tool
- tool_kwargs: dict, parameters for tool execution
"""
def __init__(
self,
tool_id: str,
tool_name: str,
tool_kwargs: dict,
**kwargs
): ...
def call_tool_with_selection(
tools: List[BaseTool],
tool_selection: ToolSelection,
**kwargs
) -> ToolOutput:
"""
Execute tool based on selection.
Parameters:
- tools: List[BaseTool], available tools
- tool_selection: ToolSelection, selected tool and parameters
- **kwargs: additional execution parameters
Returns:
- ToolOutput, tool execution result
"""
async def acall_tool_with_selection(
tools: List[BaseTool],
tool_selection: ToolSelection,
**kwargs
) -> ToolOutput:
"""Asynchronously execute tool based on selection."""
def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool:
"""
Convert synchronous tool to asynchronous interface.
Parameters:
- tool: BaseTool, synchronous tool to convert
Returns:
- AsyncBaseTool, asynchronous tool wrapper
"""
def download_tool(tool_name: str, **kwargs) -> BaseTool:
"""
Download tool from LlamaHub.
Parameters:
- tool_name: str, name of tool to download
- **kwargs: additional download parameters
Returns:
- BaseTool, downloaded and configured tool
"""from llama_index.core.agent import ReActAgent
from llama_index.core.tools import FunctionTool
from llama_index.core.llms import MockLLM
def multiply(a: int, b: int) -> int:
"""Multiply two integers."""
return a * b
def add(a: int, b: int) -> int:
"""Add two integers."""
return a + b
# Create function tools
multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)
# Initialize ReAct agent
llm = MockLLM()
agent = ReActAgent.from_tools(
tools=[multiply_tool, add_tool],
llm=llm,
verbose=True
)
# Use agent for multi-step reasoning
response = agent.chat("What is (3 * 4) + 5?")
print(f"Agent response: {response.response}")
print(f"Reasoning: {response.reasoning}")from llama_index.core.tools import QueryEngineTool
from llama_index.core import VectorStoreIndex, Document
# Create knowledge base
documents = [
Document(text="Machine learning is a subset of artificial intelligence that focuses on algorithms."),
Document(text="Deep learning uses neural networks with multiple layers for complex pattern recognition."),
Document(text="Natural language processing enables computers to understand and generate human language.")
]
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
# Create query engine tool
knowledge_tool = QueryEngineTool.from_defaults(
query_engine=query_engine,
name="knowledge_search",
description="Search the knowledge base for information about AI and machine learning topics."
)
# Add to agent
agent = ReActAgent.from_tools(
tools=[knowledge_tool, multiply_tool, add_tool],
llm=llm,
verbose=True
)
# Use agent with knowledge retrieval
response = agent.chat("What is the relationship between machine learning and AI?")
print(f"Knowledge-based response: {response.response}")import json
from typing import Dict, Any
def analyze_data(data: str, analysis_type: str = "summary") -> str:
"""
Analyze JSON data and return insights.
Args:
data: JSON string containing data to analyze
analysis_type: Type of analysis ('summary', 'statistics', 'trends')
"""
try:
parsed_data = json.loads(data)
if analysis_type == "summary":
return f"Data contains {len(parsed_data)} items with keys: {list(parsed_data.keys()) if isinstance(parsed_data, dict) else 'list format'}"
elif analysis_type == "statistics":
if isinstance(parsed_data, list) and all(isinstance(x, (int, float)) for x in parsed_data):
avg = sum(parsed_data) / len(parsed_data)
return f"Average: {avg:.2f}, Min: {min(parsed_data)}, Max: {max(parsed_data)}"
else:
return "Statistics not available for this data type"
else:
return f"Analysis type '{analysis_type}' completed"
except json.JSONDecodeError:
return "Invalid JSON data provided"
# Create custom tool
analysis_tool = FunctionTool.from_defaults(
fn=analyze_data,
name="data_analyzer",
description="Analyze JSON data and provide insights including summaries and statistics"
)
# Use in agent
data_agent = ReActAgent.from_tools(
tools=[analysis_tool],
llm=llm,
verbose=True
)
# Analyze data
sample_data = json.dumps([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
response = data_agent.chat(f"Please analyze this data: {sample_data}")
print(response.response)from llama_index.core.tools import RetrieverTool
# Create retriever tool
retriever = index.as_retriever(similarity_top_k=3)
retriever_tool = RetrieverTool.from_defaults(
retriever=retriever,
name="document_retriever",
description="Retrieve relevant documents from the knowledge base"
)
def calculate_score(factors: list, weights: list = None) -> float:
"""Calculate weighted score from factors."""
if weights is None:
weights = [1.0] * len(factors)
if len(factors) != len(weights):
return 0.0
total = sum(f * w for f, w in zip(factors, weights))
return total / sum(weights)
score_tool = FunctionTool.from_defaults(fn=calculate_score)
# Comprehensive agent with multiple tools
comprehensive_agent = ReActAgent.from_tools(
tools=[
knowledge_tool,
retriever_tool,
analysis_tool,
score_tool,
multiply_tool,
add_tool
],
llm=llm,
max_iterations=15,
verbose=True
)
# Complex multi-step task
response = comprehensive_agent.chat(
"First, search for information about machine learning. "
"Then calculate a relevance score using factors [0.8, 0.9, 0.7] with equal weights. "
"Finally, multiply the score by 100 to get a percentage."
)
print(f"Multi-step result: {response.response}")
print(f"Tools used: {[source.tool_name for source in response.sources or []]}")# Stream agent responses for real-time interaction
def stream_agent_response():
streaming_response = agent.stream_chat("Explain the process of neural network training step by step")
print("Streaming agent response:")
for chunk in streaming_response.response_gen:
print(chunk, end="", flush=True)
print("\\n\\nStreaming complete.")
# stream_agent_response()from llama_index.core.memory import ChatMemoryBuffer
# Create agent with memory
memory = ChatMemoryBuffer.from_defaults(token_limit=2000)
memory_agent = ReActAgent.from_tools(
tools=[knowledge_tool, multiply_tool],
llm=llm,
memory=memory,
verbose=True
)
# Multi-turn conversation with context
print("=== Turn 1 ===")
response1 = memory_agent.chat("What is machine learning?")
print(response1.response)
print("\\n=== Turn 2 ===")
response2 = memory_agent.chat("Can you give me a specific example?")
print(response2.response)
print("\\n=== Turn 3 ===")
response3 = memory_agent.chat("How does it relate to what we discussed earlier?")
print(response3.response)from llama_index.core.agent import FunctionAgent
# Function calling agent
function_agent = FunctionAgent.from_tools(
tools=[multiply_tool, add_tool, analysis_tool],
llm=llm, # Should be function-calling capable LLM
system_prompt="You are a helpful assistant that uses functions to solve problems step by step.",
max_function_calls=10,
verbose=True
)
response = function_agent.chat("Calculate (5 * 3) + (2 * 4), then analyze the result")
print(f"Function agent response: {response.response}")def risky_operation(value: int) -> str:
"""Operation that might fail."""
if value < 0:
raise ValueError("Value must be non-negative")
return f"Success: processed value {value}"
risky_tool = FunctionTool.from_defaults(fn=risky_operation)
error_handling_agent = ReActAgent.from_tools(
tools=[risky_tool, multiply_tool],
llm=llm,
verbose=True
)
# Test error handling
response = error_handling_agent.chat("Please process the value -5 using the risky operation")
print(f"Error handling result: {response.response}")# Agent configuration
class AgentType(str, Enum):
REACT = "react"
FUNCTION = "function"
CODE_ACT = "code_act"
WORKFLOW = "workflow"
# Tool configuration
DEFAULT_MAX_FUNCTION_CALLS = 5
DEFAULT_MAX_ITERATIONS = 10
DEFAULT_VERBOSE = False
# Response streaming
StreamingAgentChatResponse = Iterator[str]
# Memory types
BaseMemory = Any # Base memory interface
# Agent state management
class AgentState:
"""Agent state for workflow coordination."""
passInstall with Tessl CLI
npx tessl i tessl/pypi-llama-index-core