Library with high-level APIs for creating and executing LangGraph agents and tools.
ReAct-style agent creation functionality for building tool-calling agents that follow the Reasoning and Acting pattern with LangGraph workflows.
Creates an agent graph that calls tools in a loop until a stopping condition is met. Supports both static and dynamic model selection, customizable prompts, structured response generation, and advanced control flow hooks.
def create_react_agent(
model: Union[
str,
LanguageModelLike,
Callable[[StateSchema, Runtime[ContextT]], BaseChatModel],
Callable[[StateSchema, Runtime[ContextT]], Awaitable[BaseChatModel]],
Callable[
[StateSchema, Runtime[ContextT]], Runnable[LanguageModelInput, BaseMessage]
],
Callable[
[StateSchema, Runtime[ContextT]],
Awaitable[Runnable[LanguageModelInput, BaseMessage]],
],
],
tools: Union[Sequence[Union[BaseTool, Callable, dict[str, Any]]], ToolNode],
*,
prompt: Optional[Prompt] = None,
response_format: Optional[
Union[StructuredResponseSchema, tuple[str, StructuredResponseSchema]]
] = None,
pre_model_hook: Optional[RunnableLike] = None,
post_model_hook: Optional[RunnableLike] = None,
state_schema: Optional[StateSchemaType] = None,
context_schema: Optional[Type[Any]] = None,
checkpointer: Optional[Checkpointer] = None,
store: Optional[BaseStore] = None,
interrupt_before: Optional[list[str]] = None,
interrupt_after: Optional[list[str]] = None,
debug: bool = False,
version: Literal["v1", "v2"] = "v2",
name: Optional[str] = None,
**deprecated_kwargs: Any,
) -> CompiledStateGraphParameters:
model: The language model for the agent. Can be a string identifier, model instance, or callable for dynamic model selectiontools: List of tools or ToolNode instance available to the agentprompt: Optional prompt (string, SystemMessage, callable, or Runnable)response_format: Schema for structured final outputpre_model_hook: Node to execute before the agent (for message management)post_model_hook: Node to execute after the agent (for validation/approval)state_schema: Custom state schema (defaults to AgentState)context_schema: Schema for runtime contextcheckpointer: Checkpoint saver for state persistencestore: Store object for cross-session persistenceinterrupt_before/interrupt_after: Node names to interrupt atdebug: Enable debug modeversion: Graph version ("v1" or "v2")name: Name for the compiled graphReturns: CompiledStateGraph ready for execution
Usage Examples:
from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
# Basic agent with string model identifier
@tool
def search_web(query: str) -> str:
"""Search the web for information."""
# Implementation here
return f"Search results for: {query}"
agent = create_react_agent(
"anthropic:claude-3-7-sonnet-latest",
[search_web],
prompt="You are a helpful research assistant."
)
# Agent with model instance and structured output
from pydantic import BaseModel
class SearchResult(BaseModel):
query: str
summary: str
confidence: float
agent_with_structured_output = create_react_agent(
ChatAnthropic(model="claude-3-7-sonnet-latest"),
[search_web],
response_format=SearchResult,
prompt="Provide structured search results."
)
# Dynamic model selection
def select_model(state: AgentState, runtime: Runtime) -> ChatAnthropic:
# Choose model based on state or context
model_name = runtime.context.get("model_name", "claude-3-7-sonnet-latest")
model = ChatAnthropic(model=model_name)
return model.bind_tools([search_web])
dynamic_agent = create_react_agent(
select_model,
[search_web],
context_schema=dict # Context schema for runtime
)State management classes for different agent configurations.
class AgentState(TypedDict):
"""The state of the agent."""
messages: Annotated[Sequence[BaseMessage], add_messages]
remaining_steps: NotRequired[RemainingSteps]class AgentStatePydantic(BaseModel):
"""The state of the agent."""
messages: Annotated[Sequence[BaseMessage], add_messages]
remaining_steps: RemainingSteps = 25class AgentStateWithStructuredResponse(AgentState):
"""The state of the agent with a structured response."""
structured_response: StructuredResponse
class AgentStateWithStructuredResponsePydantic(AgentStatePydantic):
"""The state of the agent with a structured response."""
structured_response: StructuredResponsePre-model hooks run before each LLM call and are useful for message history management:
def trim_messages(state):
"""Keep only the last 10 messages to manage context length."""
messages = state["messages"]
if len(messages) > 10:
return {
"messages": [RemoveMessage(id=REMOVE_ALL_MESSAGES)] + messages[-10:]
}
return {}
agent = create_react_agent(
model,
tools,
pre_model_hook=trim_messages
)Post-model hooks run after each LLM call for validation or human approval:
def approval_hook(state):
"""Pause for human approval on sensitive operations."""
last_message = state["messages"][-1]
if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
# Check if any tool call requires approval
for tool_call in last_message.tool_calls:
if tool_call["name"] in ["delete_file", "send_email"]:
# Trigger human approval workflow
return interrupt_for_approval(tool_call)
return {}
agent = create_react_agent(
model,
tools,
post_model_hook=approval_hook
)from langgraph.checkpoint.sqlite import SqliteSaver
from langgraph.store.memory import InMemoryStore
# Agent with persistence and interrupts
checkpointer = SqliteSaver.from_conn_string(":memory:")
store = InMemoryStore()
agent = create_react_agent(
model,
tools,
checkpointer=checkpointer,
store=store,
interrupt_before=["tools"], # Pause before tool execution
interrupt_after=["agent"] # Pause after LLM response
)
# Execute with thread management
config = {"configurable": {"thread_id": "conversation-123"}}
result = agent.invoke({"messages": [("user", "Hello")]}, config)The agent automatically handles various error conditions:
handle_tool_errors configuration# Configure tool error handling
from langgraph.prebuilt import ToolNode
def handle_api_errors(e: Exception) -> str:
if "rate_limit" in str(e).lower():
return "API rate limit exceeded. Please try again later."
return f"Tool error: {str(e)}"
tool_node = ToolNode([search_web], handle_tool_errors=handle_api_errors)
agent = create_react_agent(model, tool_node)Install with Tessl CLI
npx tessl i tessl/pypi-langgraph-prebuilt