Library with high-level APIs for creating and executing LangGraph agents and tools.
npx @tessl/cli install tessl/pypi-langgraph-prebuilt@0.6.0LangGraph Prebuilt provides high-level APIs for creating and executing LangGraph agents and tools. It offers pre-built components that abstract complex agent orchestration patterns while maintaining flexibility for customization, focusing on simplicity and ease of use for building AI-powered applications.
pip install langgraph (bundled with LangGraph)from langgraph.prebuilt import (
create_react_agent,
ToolNode,
tools_condition,
ValidationNode,
InjectedState,
InjectedStore,
)
# Human-in-the-Loop schemas (imported from submodule)
from langgraph.prebuilt.interrupt import (
HumanInterrupt,
HumanResponse,
ActionRequest,
HumanInterruptConfig,
)from langchain_anthropic import ChatAnthropic
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
# Define a tool for the agent to use
@tool
def search(query: str) -> str:
"""Search for information on the web."""
# This is a placeholder implementation
if "sf" in query.lower():
return "It's 60 degrees and foggy in San Francisco."
return "It's 90 degrees and sunny."
# Create the agent
model = ChatAnthropic(model="claude-3-7-sonnet-latest")
agent = create_react_agent(model, [search])
# Run the agent
result = agent.invoke({
"messages": [{"role": "user", "content": "what is the weather in sf"}]
})LangGraph Prebuilt is built around several key components:
create_react_agentToolNode for parallel tool execution with comprehensive error handlingValidationNode for structured output generationInjectedState and InjectedStore for providing tools with graph contextReAct-style agent creation with dynamic model selection, tool calling, and customizable workflows. Supports both simple and complex agent architectures with pre/post-model hooks.
def create_react_agent(
model: Union[
str,
LanguageModelLike,
Callable[[StateSchema, Runtime[ContextT]], BaseChatModel],
Callable[[StateSchema, Runtime[ContextT]], Awaitable[BaseChatModel]],
Callable[
[StateSchema, Runtime[ContextT]], Runnable[LanguageModelInput, BaseMessage]
],
Callable[
[StateSchema, Runtime[ContextT]],
Awaitable[Runnable[LanguageModelInput, BaseMessage]],
],
],
tools: Union[Sequence[Union[BaseTool, Callable, dict[str, Any]]], ToolNode],
*,
prompt: Optional[Prompt] = None,
response_format: Optional[
Union[StructuredResponseSchema, tuple[str, StructuredResponseSchema]]
] = None,
pre_model_hook: Optional[RunnableLike] = None,
post_model_hook: Optional[RunnableLike] = None,
state_schema: Optional[StateSchemaType] = None,
context_schema: Optional[Type[Any]] = None,
checkpointer: Optional[Checkpointer] = None,
store: Optional[BaseStore] = None,
interrupt_before: Optional[list[str]] = None,
interrupt_after: Optional[list[str]] = None,
debug: bool = False,
version: Literal["v1", "v2"] = "v2",
name: Optional[str] = None,
**deprecated_kwargs: Any,
) -> CompiledStateGraphTool execution node that processes tool calls from AI messages with parallel execution, comprehensive error handling, and state injection capabilities.
class ToolNode(RunnableCallable):
def __init__(
self,
tools: Sequence[Union[BaseTool, Callable]],
*,
name: str = "tools",
tags: Optional[list[str]] = None,
handle_tool_errors: Union[
bool, str, Callable[..., str], tuple[type[Exception], ...]
] = True,
messages_key: str = "messages",
) -> None: ...
def tools_condition(
state: Union[list[AnyMessage], dict[str, Any], BaseModel],
messages_key: str = "messages",
) -> Literal["tools", "__end__"]Annotations for injecting graph state and persistent storage into tool arguments, enabling context-aware tools without exposing internal state to the model.
class InjectedState(InjectedToolArg):
def __init__(self, field: Optional[str] = None) -> None: ...
class InjectedStore(InjectedToolArg): ...Schema-based validation node for validating tool calls against Pydantic schemas, useful for structured output generation and data extraction workflows.
class ValidationNode(RunnableCallable):
def __init__(
self,
schemas: Sequence[Union[BaseTool, Type[BaseModel], Callable]],
*,
format_error: Optional[
Callable[[BaseException, ToolCall, Type[BaseModel]], str]
] = None,
name: str = "validation",
tags: Optional[list[str]] = None,
) -> None: ...TypedDict schemas for Agent Inbox integration, enabling human intervention and approval workflows within agent execution.
class HumanInterrupt(TypedDict):
action_request: ActionRequest
config: HumanInterruptConfig
description: Optional[str]
class HumanResponse(TypedDict):
type: Literal["accept", "ignore", "response", "edit"]
args: Union[None, str, ActionRequest]class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], add_messages]
remaining_steps: NotRequired[RemainingSteps]
class AgentStatePydantic(BaseModel):
messages: Annotated[Sequence[BaseMessage], add_messages]
remaining_steps: RemainingSteps = 25
class AgentStateWithStructuredResponse(AgentState):
structured_response: StructuredResponse
class AgentStateWithStructuredResponsePydantic(AgentStatePydantic):
structured_response: StructuredResponse# Core type aliases
StructuredResponse = Union[dict, BaseModel]
StructuredResponseSchema = Union[dict, type[BaseModel]]
# State management types
StateSchema = TypeVar("StateSchema", bound=Union[AgentState, AgentStatePydantic])
StateSchemaType = Type[StateSchema]
# Prompt types
Prompt = Union[
SystemMessage,
str,
Callable[[StateSchema], LanguageModelInput],
Runnable[StateSchema, LanguageModelInput],
]
# Message and tool types (from langchain-core)
ToolCall = dict[str, Any] # Tool call dictionary with name, args, id, type fields
BaseMessage = Any # From langchain_core.messages
AIMessage = Any # From langchain_core.messages
ToolMessage = Any # From langchain_core.messages
SystemMessage = Any # From langchain_core.messages
AnyMessage = Union[BaseMessage, AIMessage, ToolMessage, SystemMessage] # From langchain_core.messages
LanguageModelInput = Union[str, Sequence[BaseMessage]] # From langchain-core
# LangChain types
BaseChatModel = Any # From langchain_core.language_models
BaseTool = Any # From langchain_core.tools
Runnable = Any # From langchain_core.runnables
RunnableLike = Union[Runnable, Callable]
LanguageModelLike = Union[BaseChatModel, Runnable[LanguageModelInput, BaseMessage]]
# LangGraph types
Checkpointer = Any # From langgraph-checkpoint
BaseStore = Any # From langgraph.store.base
Runtime = Any # From langgraph.runtime
CompiledStateGraph = Any # From langgraph.graph.state
RemainingSteps = int # From langgraph.managed
Command = Any # From langgraph.types
Send = Any # From langgraph.types
# Type variables
ContextT = TypeVar("ContextT") # Runtime context type variable