Library with high-level APIs for creating and executing LangGraph agents and tools.
Tool execution functionality for processing tool calls from AI messages with parallel execution, comprehensive error handling, and state injection capabilities.
A node that runs tools called in the last AIMessage. Supports parallel tool execution, multiple input formats, and comprehensive error handling strategies.
class ToolNode(RunnableCallable):
def __init__(
self,
tools: Sequence[Union[BaseTool, Callable]],
*,
name: str = "tools",
tags: Optional[list[str]] = None,
handle_tool_errors: Union[
bool, str, Callable[..., str], tuple[type[Exception], ...]
] = True,
messages_key: str = "messages",
) -> NoneParameters:
tools: Sequence of tools that can be invoked (BaseTool instances or callables)name: Node identifier for graph visualization and debuggingtags: Optional metadata tags for filtering and organizationhandle_tool_errors: Error handling configuration (see Error Handling section)messages_key: State key containing the message listKey Methods:
def inject_tool_args(
self,
tool_call: ToolCall,
input: Union[list[AnyMessage], dict[str, Any], BaseModel],
store: Optional[BaseStore],
) -> ToolCallInjects graph state and store into tool call arguments for context-aware tools.
Usage Examples:
from langchain_core.tools import tool
from langgraph.prebuilt import ToolNode
from langchain_core.messages import AIMessage
# Basic tool execution
@tool
def search_web(query: str) -> str:
"""Search the web for information."""
return f"Search results for: {query}"
@tool
def calculate(expression: str) -> str:
"""Calculate a mathematical expression."""
try:
result = eval(expression) # In production, use a safe evaluator
return f"Result: {result}"
except Exception as e:
return f"Error: {e}"
# Create tool node
tool_node = ToolNode([search_web, calculate])
# Execute with messages
tool_calls = [
{"name": "search_web", "args": {"query": "Python tutorials"}, "id": "1", "type": "tool_call"},
{"name": "calculate", "args": {"expression": "2 + 2"}, "id": "2", "type": "tool_call"}
]
ai_message = AIMessage(content="", tool_calls=tool_calls)
result = tool_node.invoke({"messages": [ai_message]})
# Execute with direct tool calls
result = tool_node.invoke(tool_calls)Conditional routing function for tool-calling workflows that determines whether to continue to tool execution or end the workflow.
def tools_condition(
state: Union[list[AnyMessage], dict[str, Any], BaseModel],
messages_key: str = "messages",
) -> Literal["tools", "__end__"]Parameters:
state: Current graph state to examine for tool callsmessages_key: Key or attribute name containing the message listReturns: "tools" if tool calls are present, "end" to terminate
Usage Examples:
from langgraph.graph import StateGraph
from langgraph.prebuilt import ToolNode, tools_condition
# Basic conditional routing
graph = StateGraph(AgentState)
graph.add_node("llm", call_model_function)
graph.add_node("tools", ToolNode([search_web]))
graph.add_conditional_edges(
"llm",
tools_condition,
{"tools": "tools", "__end__": "__end__"}
)
# Custom messages key
def custom_condition(state):
return tools_condition(state, messages_key="chat_history")ToolNode supports multiple error handling strategies through the handle_tool_errors parameter:
# Catch all errors with default template
tool_node = ToolNode([my_tool], handle_tool_errors=True)
# Disable error handling (let exceptions propagate)
tool_node = ToolNode([my_tool], handle_tool_errors=False)# Use custom error message for all errors
tool_node = ToolNode([my_tool], handle_tool_errors="Tool execution failed. Please try again.")# Only handle specific exception types
tool_node = ToolNode([my_tool], handle_tool_errors=(ValueError, TypeError))# Custom error handler function
def handle_api_errors(e: requests.RequestException) -> str:
if e.response.status_code == 429:
return "API rate limit exceeded. Please wait and try again."
elif e.response.status_code >= 500:
return "API service temporarily unavailable."
else:
return f"API error: {e.response.status_code}"
tool_node = ToolNode([api_tool], handle_tool_errors=handle_api_errors)
# Handler for multiple exception types
def handle_multiple_errors(e: Union[ValueError, ConnectionError]) -> str:
if isinstance(e, ValueError):
return "Invalid input provided to tool."
elif isinstance(e, ConnectionError):
return "Network connection failed. Please check connectivity."
return str(e)
tool_node = ToolNode([network_tool], handle_tool_errors=handle_multiple_errors)ToolNode accepts multiple input formats for flexibility:
from langchain_core.messages import AIMessage, ToolMessage
# Direct message list
messages = [
AIMessage(content="I'll search for that", tool_calls=[
{"name": "search_web", "args": {"query": "Python"}, "id": "1", "type": "tool_call"}
])
]
result = tool_node.invoke(messages)# State dictionary with messages key
state = {
"messages": [
AIMessage(content="", tool_calls=[
{"name": "search_web", "args": {"query": "Python"}, "id": "1", "type": "tool_call"}
])
],
"user_id": "12345",
"session_id": "abc"
}
result = tool_node.invoke(state)from pydantic import BaseModel
class AgentState(BaseModel):
messages: list[AIMessage]
context: dict
state = AgentState(
messages=[AIMessage(content="", tool_calls=[...])],
context={"user_id": "12345"}
)
result = tool_node.invoke(state)# Direct tool call list
tool_calls = [
{"name": "search_web", "args": {"query": "Python"}, "id": "1", "type": "tool_call"},
{"name": "calculate", "args": {"expression": "2+2"}, "id": "2", "type": "tool_call"}
]
result = tool_node.invoke(tool_calls)ToolNode automatically handles state and store injection for tools that require context:
from typing_extensions import Annotated
from langgraph.prebuilt import InjectedState, InjectedStore
@tool
def context_aware_tool(
query: str,
state: Annotated[dict, InjectedState],
store: Annotated[Any, InjectedStore()]
) -> str:
"""Tool that uses graph state and persistent storage."""
user_id = state.get("user_id", "unknown")
# Access persistent data
user_prefs = store.get(("preferences", user_id), "theme")
return f"Query: {query}, User: {user_id}, Theme: {user_prefs}"
tool_node = ToolNode([context_aware_tool])Tools can return Command objects for advanced state management:
from langgraph.types import Command
@tool
def state_modifying_tool(action: str) -> Command:
"""Tool that modifies graph state."""
if action == "reset":
return Command(
update={
"messages": [RemoveMessage(id=REMOVE_ALL_MESSAGES)],
"step_count": 0
}
)
elif action == "interrupt":
return Command(
goto=Send("human_approval", {"action": action})
)
return Command(update={"last_action": action})ToolNode automatically executes multiple tool calls in parallel:
# Multiple tool calls are executed concurrently
tool_calls = [
{"name": "search_web", "args": {"query": "Python"}, "id": "1", "type": "tool_call"},
{"name": "search_web", "args": {"query": "JavaScript"}, "id": "2", "type": "tool_call"},
{"name": "calculate", "args": {"expression": "10 * 5"}, "id": "3", "type": "tool_call"}
]
# All three tools run in parallel, results returned as list of ToolMessages
results = tool_node.invoke(tool_calls)ToolNode automatically formats tool outputs for message consumption:
def msg_content_output(output: Any) -> Union[str, list[dict]]Parameter:
output: Raw output from tool executionReturns: Formatted content (string or structured content blocks)
This function handles:
INVALID_TOOL_NAME_ERROR_TEMPLATE = (
"Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
)
TOOL_CALL_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."These templates are used for formatting error messages when invalid tools are called or tool execution fails.
Install with Tessl CLI
npx tessl i tessl/pypi-langgraph-prebuilt