Building applications with LLMs through composability
—
A comprehensive toolkit for defining and managing executable tools that LLM agents can invoke. The tools module provides decorators, base classes, dependency injection mechanisms, and runtime context for building function-based and class-based tools with full support for state management, error handling, and advanced injection patterns.
pip install langchainfrom langchain.tools import (
BaseTool, tool, ToolException,
InjectedToolArg, InjectedToolCallId,
InjectedState, InjectedStore, ToolRuntime
)from langchain.tools import tool
from langchain.agents import create_agent
# Simple function-based tool
@tool
def calculator(expression: str) -> float:
"""Evaluate a mathematical expression.
Args:
expression: Mathematical expression to evaluate
Returns:
Result of the calculation
"""
return eval(expression)
# Use tool in an agent
agent = create_agent(
model="openai:gpt-4o",
tools=[calculator],
system_prompt="Use the calculator for math operations."
)
result = agent.invoke({
"messages": [{"role": "user", "content": "What is 42 * 137?"}]
})The tools system is built on a flexible architecture that separates tool definition from execution:
@tool decorator) or as classes (inheriting from BaseTool)ToolRuntime provides execution context with access to configuration, store, and other runtime metadataToolException provides structured error handling with messages that can be returned to the LLMThis design enables building tools that range from simple stateless functions to complex stateful operations with full access to agent state and persistent storage.
All tools must have:
The @tool decorator transforms regular Python functions into LangChain tools with automatic schema generation.
@tool
def function_name(arg1: type1, arg2: type2) -> return_type:
"""Tool description.
Args:
arg1: Description of arg1
arg2: Description of arg2
Returns:
Description of return value
"""
...Features:
Simple Example:
from langchain.tools import tool
@tool
def get_word_length(word: str) -> int:
"""Calculate the length of a word.
Args:
word: The word to measure
Returns:
The number of characters in the word
"""
return len(word)Multiple Parameters:
@tool
def search_database(
query: str,
limit: int = 10,
include_archived: bool = False
) -> list[dict]:
"""Search the database for matching records.
Args:
query: Search query string
limit: Maximum number of results to return (default: 10)
include_archived: Whether to include archived records (default: False)
Returns:
List of matching database records
"""
results = perform_search(query, limit, include_archived)
return resultsAsync Tools:
import asyncio
from langchain.tools import tool
@tool
async def fetch_weather(city: str) -> dict:
"""Fetch current weather data for a city.
Args:
city: Name of the city
Returns:
Weather data including temperature, conditions, humidity
"""
async with httpx.AsyncClient() as client:
response = await client.get(f"https://api.weather.com/v1/{city}")
return response.json()Custom Tool Name:
@tool("my_custom_tool_name")
def my_function(x: int) -> int:
"""This tool has a custom name instead of using the function name."""
return x * 2Tool with Complex Return Types:
from typing import TypedDict
from langchain.tools import tool
class SearchResult(TypedDict):
title: str
url: str
snippet: str
relevance_score: float
@tool
def web_search(query: str, num_results: int = 5) -> list[SearchResult]:
"""Search the web and return ranked results.
Args:
query: Search query
num_results: Number of results to return
Returns:
List of search results with titles, URLs, and relevance scores
"""
# Implementation
return search_engine.search(query, num_results)For tools requiring initialization state, validation logic, or fine-grained control, inherit from BaseTool.
class BaseTool:
"""Abstract base class for tools.
Subclasses must implement:
- _run() or _arun() methods for execution
- name: str attribute
- description: str attribute
"""
def invoke(self, input: dict | str) -> Any:
"""Execute tool synchronously."""
...
async def ainvoke(self, input: dict | str) -> Any:
"""Execute tool asynchronously."""
...Key Methods:
invoke(input): Synchronous tool executionainvoke(input): Asynchronous tool execution_run(*args, **kwargs): Implementation method for sync execution_arun(*args, **kwargs): Implementation method for async executionRequired Attributes:
name (str): Tool identifierdescription (str): Tool description for LLMOptional Attributes:
args_schema (Type[BaseModel]): Pydantic model for input validationreturn_direct (bool): Whether to return result directly to userverbose (bool): Enable verbose loggingBasic Example:
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class CalculatorInput(BaseModel):
"""Input schema for calculator tool."""
expression: str = Field(description="Mathematical expression to evaluate")
class CalculatorTool(BaseTool):
name: str = "calculator"
description: str = "Evaluate mathematical expressions safely"
args_schema: type[BaseModel] = CalculatorInput
def _run(self, expression: str) -> float:
"""Synchronous execution."""
try:
result = eval(expression, {"__builtins__": {}}, {})
return float(result)
except Exception as e:
raise ToolException(f"Invalid expression: {e}")
async def _arun(self, expression: str) -> float:
"""Asynchronous execution."""
return self._run(expression)
# Instantiate and use
calculator = CalculatorTool()
result = calculator.invoke({"expression": "2 + 2"})Stateful Tool:
from langchain.tools import BaseTool
from pydantic import BaseModel
class DatabaseConfig(BaseModel):
"""Configuration for database tool."""
host: str
port: int
database: str
class DatabaseQueryTool(BaseTool):
name: str = "database_query"
description: str = "Query the company database"
# Tool-specific state
connection: Any = None
config: DatabaseConfig
def __init__(self, config: DatabaseConfig):
super().__init__()
self.config = config
self.connection = create_database_connection(
config.host, config.port, config.database
)
def _run(self, query: str) -> list[dict]:
"""Execute database query."""
cursor = self.connection.cursor()
cursor.execute(query)
return cursor.fetchall()
async def _arun(self, query: str) -> list[dict]:
"""Execute database query asynchronously."""
return await self.connection.execute_async(query)Tool with Return Direct:
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class WeatherInput(BaseModel):
"""Input schema for weather tool."""
city: str = Field(description="City name")
class WeatherTool(BaseTool):
name: str = "get_weather"
description: str = "Get current weather for a city"
args_schema: type[BaseModel] = WeatherInput
return_direct: bool = True # Return result directly to user without LLM processing
def _run(self, city: str) -> str:
"""Get weather data."""
return f"Weather in {city}: 72°F, Sunny"
async def _arun(self, city: str) -> str:
return self._run(city)
# When used in an agent, this tool's output goes directly to the user
# without being processed by the LLMTool with Multiple Arguments:
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class FileSearchInput(BaseModel):
directory: str = Field(description="Directory to search in")
pattern: str = Field(description="File pattern to match (e.g., '*.py')")
recursive: bool = Field(default=True, description="Search subdirectories")
class FileSearchTool(BaseTool):
name: str = "file_search"
description: str = "Search for files matching a pattern"
args_schema: type[BaseModel] = FileSearchInput
def _run(self, directory: str, pattern: str, recursive: bool = True) -> list[str]:
"""Search for files."""
import glob
search_pattern = f"{directory}/**/{pattern}" if recursive else f"{directory}/{pattern}"
matches = glob.glob(search_pattern, recursive=recursive)
return matches
async def _arun(self, directory: str, pattern: str, recursive: bool = True) -> list[str]:
return self._run(directory, pattern, recursive)ToolException provides structured error handling, allowing tools to communicate errors back to the LLM.
class ToolException(Exception):
"""Exception for tool execution errors.
When raised in a tool, the error message is passed back
to the LLM in a ToolMessage for error handling.
"""Basic Usage:
from langchain.tools import tool, ToolException
@tool
def divide_numbers(a: float, b: float) -> float:
"""Divide two numbers.
Args:
a: Numerator
b: Denominator
Returns:
Result of a / b
"""
if b == 0:
raise ToolException("Cannot divide by zero. Please provide a non-zero denominator.")
return a / bError Handling with Context:
from langchain.tools import tool, ToolException
import requests
@tool
def fetch_url(url: str) -> str:
"""Fetch content from a URL.
Args:
url: URL to fetch
Returns:
Page content
"""
try:
response = requests.get(url, timeout=10)
response.raise_for_status()
return response.text
except requests.Timeout:
raise ToolException(
f"Request to {url} timed out after 10 seconds. "
"The server may be slow or unreachable."
)
except requests.HTTPError as e:
raise ToolException(
f"HTTP error {e.response.status_code} when fetching {url}. "
f"The URL may be invalid or the server may be down."
)
except Exception as e:
raise ToolException(f"Unexpected error fetching {url}: {str(e)}")Validation Errors:
from langchain.tools import tool, ToolException
from datetime import datetime
@tool
def schedule_meeting(
date: str,
time: str,
duration_minutes: int,
attendees: list[str]
) -> str:
"""Schedule a meeting.
Args:
date: Meeting date in YYYY-MM-DD format
time: Meeting time in HH:MM format
duration_minutes: Duration in minutes (15-480)
attendees: List of attendee email addresses
"""
# Validate date format
try:
datetime.strptime(date, '%Y-%m-%d')
except ValueError:
raise ToolException(
f"Invalid date format: {date}. Please use YYYY-MM-DD format (e.g., 2024-12-31)."
)
# Validate time format
try:
datetime.strptime(time, '%H:%M')
except ValueError:
raise ToolException(
f"Invalid time format: {time}. Please use HH:MM format (e.g., 14:30)."
)
# Validate duration
if not 15 <= duration_minutes <= 480:
raise ToolException(
f"Invalid duration: {duration_minutes} minutes. "
"Duration must be between 15 and 480 minutes (8 hours)."
)
# Validate attendees
if len(attendees) == 0:
raise ToolException("At least one attendee is required.")
# Schedule meeting
meeting_id = create_meeting(date, time, duration_minutes, attendees)
return f"Meeting scheduled successfully. Meeting ID: {meeting_id}"Class-Based Tool with Exception Handling:
from langchain.tools import BaseTool, ToolException
class DatabaseQueryTool(BaseTool):
name: str = "query_database"
description: str = "Execute SQL queries on the database"
def _run(self, query: str) -> list[dict]:
"""Execute query with comprehensive error handling."""
if not query.strip().upper().startswith('SELECT'):
raise ToolException(
"Only SELECT queries are allowed for safety. "
"Queries must start with 'SELECT'."
)
try:
results = self.connection.execute(query)
if len(results) == 0:
raise ToolException(
"Query returned no results. Try modifying your query criteria."
)
return results
except DatabaseConnectionError:
raise ToolException(
"Database connection lost. Please try again in a moment."
)
except QuerySyntaxError as e:
raise ToolException(
f"SQL syntax error: {e}. Please check your query syntax."
)
async def _arun(self, query: str) -> list[dict]:
return self._run(query)Tools can access runtime context (state, store, call IDs, configuration) without exposing these parameters to the LLM. This is useful for:
For advanced dependency injection patterns, see:
from typing import Annotated
from langchain.tools import tool, InjectedToolArg
@tool
def get_user_info(
user_id: str,
config: Annotated[dict, InjectedToolArg]
) -> dict:
"""Get information about a user.
Args:
user_id: The user's ID
Returns:
User information dictionary
"""
# config is injected at runtime and not visible to the LLM
api_key = config.get("api_key")
database = config.get("database")
return database.get_user(user_id, api_key=api_key)Here's a practical example showing multiple tool types working together:
from typing import Annotated
from langchain.tools import tool, BaseTool, ToolException
from langchain.agents import create_agent
from pydantic import BaseModel, Field
# Simple function tool
@tool
def calculate(expression: str) -> float:
"""Evaluate a mathematical expression safely.
Args:
expression: Math expression to evaluate (e.g., "2 + 2")
Returns:
Result of the calculation
"""
try:
result = eval(expression, {"__builtins__": {}}, {
"abs": abs, "min": min, "max": max,
"sum": sum, "pow": pow
})
return float(result)
except Exception as e:
raise ToolException(f"Cannot evaluate expression: {e}")
# Class-based tool
class WeatherInput(BaseModel):
city: str = Field(description="City name")
units: str = Field(default="celsius", description="Temperature units")
class WeatherTool(BaseTool):
name: str = "get_weather"
description: str = "Get current weather for a city"
args_schema: type[BaseModel] = WeatherInput
api_key: str = ""
def __init__(self, api_key: str):
super().__init__()
self.api_key = api_key
def _run(self, city: str, units: str = "celsius") -> str:
"""Get weather data."""
try:
# Simulate API call
temp = 20.0 if units == "celsius" else 68.0
unit_symbol = "°C" if units == "celsius" else "°F"
return f"Weather in {city}: {temp}{unit_symbol}, Partly Cloudy"
except Exception as e:
raise ToolException(f"Failed to fetch weather: {e}")
async def _arun(self, city: str, units: str = "celsius") -> str:
return self._run(city, units)
# Create agent with both tools
weather_tool = WeatherTool(api_key="demo-key")
agent = create_agent(
model="openai:gpt-4o",
tools=[calculate, weather_tool],
system_prompt="""You are a helpful assistant with access to calculation and weather tools.
Use these tools to help the user effectively."""
)
# Use the agent
result = agent.invoke({
"messages": [{"role": "user", "content": "Calculate 15 * 8 and get weather in Paris"}]
})
print(result["messages"][-1].content)Here's a comprehensive example showing all tool capabilities working together:
from typing import Annotated, TypedDict
from langchain.tools import tool, BaseTool, ToolException
from langchain.tools import InjectedToolArg, InjectedToolCallId
from langchain.agents import create_agent, AgentState
from langgraph.prebuilt import InjectedState, InjectedStore, ToolRuntime
from langgraph.store.memory import InMemoryStore
from langgraph.store import BaseStore
from pydantic import BaseModel, Field
import time
# Custom state schema
class CustomAgentState(AgentState):
user_id: str
session_start_time: float
# Simple function tool
@tool
def calculate(expression: str) -> float:
"""Evaluate a mathematical expression safely.
Args:
expression: Math expression to evaluate (e.g., "2 + 2")
Returns:
Result of the calculation
"""
try:
# Safe evaluation
result = eval(expression, {"__builtins__": {}}, {
"abs": abs, "min": min, "max": max,
"sum": sum, "pow": pow
})
return float(result)
except Exception as e:
raise ToolException(f"Cannot evaluate expression: {e}")
# Tool with state injection
@tool
def get_session_duration(
state: Annotated[CustomAgentState, InjectedState]
) -> str:
"""Get the current session duration.
Returns:
Session duration in seconds
"""
start_time = state.get("session_start_time", time.time())
duration = time.time() - start_time
return f"Session duration: {duration:.2f} seconds"
# Tool with store injection
@tool
def save_note(
note: str,
state: Annotated[CustomAgentState, InjectedState],
store: Annotated[BaseStore, InjectedStore],
call_id: Annotated[str, InjectedToolCallId]
) -> str:
"""Save a note for this user.
Args:
note: Note content to save
Returns:
Confirmation message
"""
user_id = state.get("user_id", "unknown")
namespace = ("user_notes", user_id)
store.put(namespace, call_id, {
"note": note,
"timestamp": time.time()
})
return f"Note saved with ID: {call_id}"
@tool
def list_notes(
state: Annotated[CustomAgentState, InjectedState],
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""List all notes for this user.
Returns:
List of saved notes
"""
user_id = state.get("user_id", "unknown")
namespace = ("user_notes", user_id)
items = store.search(namespace)
if not items:
return "No notes found"
notes = []
for item in items:
note_data = item.value
timestamp = note_data["timestamp"]
age = time.time() - timestamp
notes.append(f"- {note_data['note']} (saved {age:.0f}s ago)")
return "Your notes:\n" + "\n".join(notes)
# Class-based tool with configuration
class WeatherInput(BaseModel):
city: str = Field(description="City name")
units: str = Field(default="celsius", description="Temperature units")
class WeatherTool(BaseTool):
name: str = "get_weather"
description: str = "Get current weather for a city"
args_schema: type[BaseModel] = WeatherInput
# Tool configuration
api_key: str = ""
def __init__(self, api_key: str):
super().__init__()
self.api_key = api_key
def _run(
self,
city: str,
units: str = "celsius",
call_id: Annotated[str, InjectedToolCallId] = None
) -> str:
"""Get weather data."""
print(f"[{call_id}] Fetching weather for {city}")
try:
# Simulate API call
temp = 20.0 if units == "celsius" else 68.0
unit_symbol = "°C" if units == "celsius" else "°F"
return f"Weather in {city}: {temp}{unit_symbol}, Partly Cloudy"
except Exception as e:
raise ToolException(f"Failed to fetch weather: {e}")
async def _arun(
self,
city: str,
units: str = "celsius",
call_id: Annotated[str, InjectedToolCallId] = None
) -> str:
return self._run(city, units, call_id)
# Create agent with all tools
def create_comprehensive_agent():
# Initialize store
store = InMemoryStore()
# Initialize weather tool with config
weather_tool = WeatherTool(api_key="demo-key")
# Create agent
agent = create_agent(
model="openai:gpt-4o",
tools=[
calculate,
get_session_duration,
save_note,
list_notes,
weather_tool
],
state_schema=CustomAgentState,
store=store,
system_prompt="""You are a helpful assistant with access to several tools.
You can:
- Perform calculations
- Check session duration
- Save and retrieve notes
- Get weather information
Use these tools to help the user effectively."""
)
return agent, store
# Example usage
if __name__ == "__main__":
agent, store = create_comprehensive_agent()
# Conversation with custom state
session_start = time.time()
# First interaction
result1 = agent.invoke({
"messages": [{"role": "user", "content": "Calculate 15 * 8 and save a note that I asked this"}],
"user_id": "user-123",
"session_start_time": session_start
})
print("Result 1:", result1["messages"][-1].content)
# Second interaction
time.sleep(2)
result2 = agent.invoke({
"messages": result1["messages"] + [
{"role": "user", "content": "Show me my notes and how long this session has been"}
],
"user_id": "user-123",
"session_start_time": session_start
})
print("Result 2:", result2["messages"][-1].content)
# Third interaction
result3 = agent.invoke({
"messages": result2["messages"] + [
{"role": "user", "content": "What's the weather in Paris?"}
],
"user_id": "user-123",
"session_start_time": session_start
})
print("Result 3:", result3["messages"][-1].content)This comprehensive example demonstrates:
@toolToolExceptionfrom langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel
from typing import Any, Callable
# Tool decorator
def tool(
func: Callable | None = None,
*,
name: str | None = None,
description: str | None = None,
return_direct: bool = False,
args_schema: type[BaseModel] | None = None,
infer_schema: bool = True
) -> BaseTool:
"""Decorator to convert functions to tools."""
...
# Base tool class
class BaseTool:
"""Abstract base class for tools."""
name: str
description: str
args_schema: type[BaseModel] | None
return_direct: bool
verbose: bool
def invoke(self, input: dict | str) -> Any: ...
async def ainvoke(self, input: dict | str) -> Any: ...
def _run(self, *args: Any, **kwargs: Any) -> Any: ...
async def _arun(self, *args: Any, **kwargs: Any) -> Any: ...
# Exception class
class ToolException(Exception):
"""Exception for tool execution errors."""
...Install with Tessl CLI
npx tessl i tessl/pypi-langchain@1.2.1