LLM framework to build customizable, production-ready LLM applications with pipelines connecting models, vector DBs, and data processors.
—
Interactive LLM agents with tool usage, memory management, and conversational capabilities for complex reasoning tasks. Agents use large language models to reason through multi-step problems by selecting and using appropriate tools, maintaining conversation memory, and generating step-by-step solutions.
from haystack.agents import Agent, Tool, AgentStep
from haystack.agents.memory import Memory, ConversationMemory, ConversationSummaryMemory, NoMemory
from haystack.nodes import PromptNodeThe main Agent class that uses tools and memory to answer complex queries through multi-step reasoning.
class Agent:
def __init__(
self,
prompt_node: PromptNode,
prompt_template: Optional[Union[str, PromptTemplate]] = None,
tools_manager: Optional[ToolsManager] = None,
memory: Optional[Memory] = None,
prompt_parameters_resolver: Optional[Callable] = None,
max_steps: int = 8,
final_answer_pattern: str = r"Final Answer\s*:\s*(.*)",
streaming: bool = True,
):
"""
Creates an Agent instance for multi-step reasoning and tool usage.
Args:
prompt_node: PromptNode for decision making and tool selection
prompt_template: Custom template for reasoning process
tools_manager: Manager for available tools
memory: Memory system for conversation context
prompt_parameters_resolver: Function to resolve prompt parameters
max_steps: Maximum reasoning steps before stopping
final_answer_pattern: Regex pattern to extract final answers
streaming: Whether to stream LLM responses
"""
def run(self, query: str, params: Optional[dict] = None) -> AgentStep:
"""Execute multi-step reasoning to answer a query."""
def add_tool(self, tool: Tool) -> None:
"""Add a tool to the agent's available tools."""Represents a pipeline or node that an Agent can use to perform specific tasks.
class Tool:
def __init__(
self,
name: str,
pipeline_or_node: Union[BaseComponent, Pipeline, Callable[[Any], str]],
description: str,
output_variable: str = "results",
logging_color: Color = Color.YELLOW,
):
"""
Create a tool that an Agent can use.
Args:
name: Short name for the tool (letters, digits, underscores only)
pipeline_or_node: Pipeline, node, or callable to execute
description: Description for when to use this tool
output_variable: Variable name for tool output
logging_color: Color for logging output
"""
def run(self, tool_input: str, params: Optional[dict] = None) -> str:
"""Execute the tool with given input."""Represents a single step in the Agent's reasoning process.
class AgentStep:
def __init__(
self,
current_step: int = 1,
max_steps: int = 8,
final_answer_pattern: str = r"Final Answer\s*:\s*(.*)",
prompt_node_response: str = "",
):
"""
A step in the Agent's reasoning process.
Args:
current_step: Current step number
max_steps: Maximum allowed steps
final_answer_pattern: Pattern to detect final answers
prompt_node_response: LLM response for this step
"""
@property
def is_last(self) -> bool:
"""Check if this is the final step."""
def extract_final_answer(self, text: str) -> Optional[str]:
"""Extract final answer from text using pattern."""class Memory:
def load(self, keys: Optional[List[str]] = None, **kwargs) -> str:
"""Load memory content for the given keys."""
def save(self, data: Dict[str, Any]) -> None:
"""Save data to memory."""
def clear(self) -> None:
"""Clear all memory content."""Simple memory that stores conversation history as a list of messages.
class ConversationMemory(Memory):
def __init__(self, window_size: Optional[int] = None):
"""
Create conversation memory with optional sliding window.
Args:
window_size: Maximum number of conversation turns to remember
"""Memory that summarizes conversation history to stay within token limits.
class ConversationSummaryMemory(Memory):
def __init__(
self,
prompt_node: PromptNode,
summary_frequency: int = 3,
prompt_template: Optional[str] = None
):
"""
Create summarizing conversation memory.
Args:
prompt_node: PromptNode for generating summaries
summary_frequency: How often to summarize (in conversation turns)
prompt_template: Custom template for summarization
"""Memory implementation that stores nothing (stateless agent).
class NoMemory(Memory):
def __init__(self):
"""Create a memory system that stores nothing."""from haystack import Pipeline, Document
from haystack.agents import Agent, Tool
from haystack.nodes import PromptNode, BM25Retriever
from haystack.document_stores import InMemoryDocumentStore
# Create document store and retriever
doc_store = InMemoryDocumentStore()
doc_store.write_documents([
Document(content="Python is a programming language."),
Document(content="Machine learning uses algorithms to find patterns.")
])
retriever = BM25Retriever(document_store=doc_store)
# Create search tool
search_tool = Tool(
name="DocumentSearch",
pipeline_or_node=retriever,
description="Useful for finding information about programming and ML concepts"
)
# Create agent with OpenAI
agent = Agent(
prompt_node=PromptNode(model_name_or_path="gpt-3.5-turbo", api_key="your-key"),
max_steps=5
)
agent.add_tool(search_tool)
# Use agent
result = agent.run("What is Python and how is it used in machine learning?")
print(result.final_answer)from haystack.agents import Agent, Tool
from haystack.agents.memory import ConversationMemory
from haystack.nodes import PromptNode
# Create agent with conversation memory
agent = Agent(
prompt_node=PromptNode(model_name_or_path="gpt-3.5-turbo", api_key="your-key"),
memory=ConversationMemory(window_size=10),
max_steps=6
)
# Add calculator tool
def calculator(expression: str) -> str:
try:
return str(eval(expression))
except:
return "Invalid expression"
calc_tool = Tool(
name="Calculator",
pipeline_or_node=calculator,
description="Useful for mathematical calculations"
)
agent.add_tool(calc_tool)
# Multi-turn conversation
result1 = agent.run("What is 15 * 7?")
print(result1.final_answer) # "105"
result2 = agent.run("Add 23 to that result")
print(result2.final_answer) # "128" (remembers previous calculation)from haystack.agents import Agent, Tool
from haystack.pipelines import ExtractiveQAPipeline
from haystack.nodes import PromptNode, FARMReader, BM25Retriever
# Create QA pipeline
qa_pipeline = ExtractiveQAPipeline(
reader=FARMReader("deepset/roberta-base-squad2"),
retriever=BM25Retriever(document_store=doc_store)
)
# Create QA tool
qa_tool = Tool(
name="QuestionAnswering",
pipeline_or_node=qa_pipeline,
description="Useful for answering specific questions about documents"
)
# Create generation tool
generator = PromptNode(model_name_or_path="gpt-3.5-turbo", api_key="your-key")
gen_tool = Tool(
name="TextGenerator",
pipeline_or_node=generator,
description="Useful for generating creative text or explanations"
)
# Create multi-tool agent
agent = Agent(
prompt_node=PromptNode(model_name_or_path="gpt-3.5-turbo", api_key="your-key"),
max_steps=8
)
agent.add_tool(qa_tool)
agent.add_tool(gen_tool)
# Complex query requiring multiple tools
result = agent.run("Find information about neural networks and then write a simple explanation for beginners")
print(result.final_answer)from haystack.agents import Agent, Tool
from haystack.agents.memory import ConversationSummaryMemory
from haystack.nodes import PromptNode
# Create agent with summary memory and streaming
prompt_node = PromptNode(model_name_or_path="gpt-3.5-turbo", api_key="your-key")
memory = ConversationSummaryMemory(prompt_node=prompt_node, summary_frequency=5)
agent = Agent(
prompt_node=prompt_node,
memory=memory,
streaming=True, # Stream responses
max_steps=10
)
# Add tools and run with streaming output
web_search_tool = Tool(
name="WebSearch",
pipeline_or_node=web_search_pipeline, # Assume this exists
description="Search the web for current information"
)
agent.add_tool(web_search_tool)
# The agent will stream its reasoning process
result = agent.run("What are the latest developments in quantum computing?")from typing import Optional, Union, Dict, Any, List, Callable
from enum import Enum
class Color(Enum):
"""Colors for logging tool outputs."""
BLACK = "black"
RED = "red"
GREEN = "green"
YELLOW = "yellow"
BLUE = "blue"
MAGENTA = "magenta"
CYAN = "cyan"
WHITE = "white"
class ToolsManager:
"""Manager for organizing and accessing agent tools."""
def __init__(self):
self.tools: Dict[str, Tool] = {}
def add_tool(self, tool: Tool) -> None:
"""Add a tool to the manager."""
def get_tool_names(self) -> List[str]:
"""Get names of all registered tools."""
def get_tool_names_with_descriptions(self) -> str:
"""Get formatted string of tool names and descriptions."""Install with Tessl CLI
npx tessl i tessl/pypi-farm-haystack