Building applications with LLMs through composability
—
LangChain's dependency injection system allows tools to access runtime context, state, and storage without exposing these parameters to the LLM's tool schema. This powerful feature enables tools to leverage conversation history, persistent storage, and execution metadata while maintaining clean, focused tool signatures for the model.
The injection system provides five main injection types:
All injected parameters are marked with Python's Annotated type hint and are automatically hidden from the tool schema that the LLM sees.
InjectedToolArg is a generic annotation for marking tool parameters that should be injected at runtime without exposing them to the LLM.
InjectedToolArg = Annotated[T, InjectionMarker]Purpose: Mark parameters for dependency injection without exposing them to the LLM
Usage: Annotate function parameters with Annotated[Type, InjectedToolArg]
from typing import Annotated
from langchain.tools import tool, InjectedToolArg
@tool
def get_user_info(
user_id: str,
config: Annotated[dict, InjectedToolArg]
) -> dict:
"""Get information about a user.
Args:
user_id: The user's ID
Returns:
User information dictionary
"""
# config is injected at runtime and not visible to the LLM
api_key = config.get("api_key")
database = config.get("database")
# Use injected config to fetch user info
return database.get_user(user_id, api_key=api_key)
# When creating the agent, pass config through runtime configuration
agent = create_agent(
model="openai:gpt-4o",
tools=[get_user_info]
)
result = agent.invoke(
{"messages": [{"role": "user", "content": "Get info for user-123"}]},
config={
"api_key": "secret-key",
"database": db_connection
}
)from typing import Annotated
from langchain.tools import tool, InjectedToolArg
@tool
def secure_api_call(
endpoint: str,
api_key: Annotated[str, InjectedToolArg],
base_url: Annotated[str, InjectedToolArg],
timeout: Annotated[int, InjectedToolArg] = 30
) -> dict:
"""Make an authenticated API call.
Args:
endpoint: API endpoint to call (e.g., '/users')
Returns:
API response data
"""
# All other parameters are injected
import requests
url = f"{base_url}{endpoint}"
headers = {"Authorization": f"Bearer {api_key}"}
response = requests.get(url, headers=headers, timeout=timeout)
return response.json()from typing import Annotated
from langchain.tools import BaseTool, InjectedToolArg
from pydantic import BaseModel, Field
class FileReadInput(BaseModel):
filename: str = Field(description="Name of the file to read")
class FileReadTool(BaseTool):
name: str = "read_file"
description: str = "Read contents of a file"
args_schema: type[BaseModel] = FileReadInput
def _run(
self,
filename: str,
base_directory: Annotated[str, InjectedToolArg]
) -> str:
"""Read file with injected base directory."""
import os
# base_directory is injected and not exposed to LLM
full_path = os.path.join(base_directory, filename)
# Security check
if not full_path.startswith(base_directory):
raise ToolException("Access denied: path traversal detected")
with open(full_path, 'r') as f:
return f.read()
async def _arun(
self,
filename: str,
base_directory: Annotated[str, InjectedToolArg]
) -> str:
return self._run(filename, base_directory)InjectedToolCallId provides access to the unique identifier of the current tool invocation, useful for logging, tracking, and correlation.
InjectedToolCallId = Annotated[str, InjectionMarker]Purpose: Access the unique identifier of the current tool invocation
Usage: Use in function signature to receive the tool call ID
from typing import Annotated
from langchain.tools import tool, InjectedToolCallId
@tool
def process_data(
data: str,
call_id: Annotated[str, InjectedToolCallId]
) -> str:
"""Process data and log the operation.
Args:
data: Data to process
Returns:
Processed data
"""
# call_id is automatically injected
print(f"[{call_id}] Processing data: {data}")
result = data.upper()
print(f"[{call_id}] Processing complete")
return resultfrom typing import Annotated
from langchain.tools import tool, InjectedToolCallId
import logging
logger = logging.getLogger(__name__)
@tool
def fetch_external_api(
endpoint: str,
call_id: Annotated[str, InjectedToolCallId]
) -> dict:
"""Fetch data from external API with tracking.
Args:
endpoint: API endpoint to call
Returns:
API response data
"""
logger.info(f"[{call_id}] Starting API call to {endpoint}")
try:
response = requests.get(f"https://api.example.com{endpoint}")
logger.info(f"[{call_id}] API call successful, status: {response.status_code}")
return response.json()
except Exception as e:
logger.error(f"[{call_id}] API call failed: {e}")
raise ToolException(f"API call failed: {e}")from typing import Annotated
from langchain.tools import tool, InjectedToolCallId
# Shared state for tracking related operations
operation_tracker = {}
@tool
def start_operation(
operation_name: str,
call_id: Annotated[str, InjectedToolCallId]
) -> str:
"""Start a tracked operation.
Args:
operation_name: Name of the operation to start
Returns:
Confirmation message
"""
operation_tracker[call_id] = {
"name": operation_name,
"start_time": time.time(),
"status": "in_progress"
}
return f"Operation '{operation_name}' started with ID {call_id}"
@tool
def complete_operation(
result: str,
call_id: Annotated[str, InjectedToolCallId]
) -> str:
"""Complete a tracked operation.
Args:
result: Operation result
Returns:
Summary message
"""
if call_id in operation_tracker:
operation = operation_tracker[call_id]
duration = time.time() - operation["start_time"]
operation["status"] = "completed"
operation["result"] = result
return f"Operation '{operation['name']}' completed in {duration:.2f}s"
else:
return "No operation found with this ID"InjectedState injects the current agent state into tools, providing access to conversation history and custom state fields.
InjectedState = Annotated[StateType, InjectionMarker]Purpose: Access agent state including messages and custom fields
Usage: Type hint with the specific state schema type
from typing import Annotated
from langchain.tools import tool
from langchain.agents import AgentState
from langgraph.prebuilt import InjectedState
@tool
def summarize_conversation(
state: Annotated[AgentState, InjectedState]
) -> str:
"""Summarize the conversation so far.
Returns:
A summary of the conversation
"""
# Access messages from state
messages = state["messages"]
# Count messages by type
human_messages = [m for m in messages if m.type == "human"]
ai_messages = [m for m in messages if m.type == "ai"]
summary = f"Conversation has {len(human_messages)} user messages "
summary += f"and {len(ai_messages)} AI responses. "
# Include recent context
if len(messages) > 0:
last_message = messages[-1]
summary += f"Last message: {last_message.content[:100]}..."
return summaryfrom typing import Annotated, TypedDict
from langchain.tools import tool
from langchain.agents import AgentState, create_agent
from langgraph.prebuilt import InjectedState
# Define custom state schema
class CustomState(AgentState):
user_name: str
user_preferences: dict
@tool
def personalized_greeting(
state: Annotated[CustomState, InjectedState]
) -> str:
"""Generate a personalized greeting.
Returns:
Personalized greeting message
"""
# Access custom state fields
name = state.get("user_name", "there")
preferences = state.get("user_preferences", {})
theme = preferences.get("theme", "default")
greeting = f"Hello, {name}! "
greeting += f"I see you prefer the {theme} theme. "
greeting += "How can I help you today?"
return greeting
# Create agent with custom state
agent = create_agent(
model="openai:gpt-4o",
tools=[personalized_greeting],
state_schema=CustomState
)
# Invoke with custom state fields
result = agent.invoke({
"messages": [{"role": "user", "content": "Greet me"}],
"user_name": "Alice",
"user_preferences": {"theme": "dark"}
})from typing import Annotated
from langchain.tools import tool, ToolException
from langchain.agents import AgentState
from langgraph.prebuilt import InjectedState
@tool
def context_aware_search(
query: str,
state: Annotated[AgentState, InjectedState]
) -> list[str]:
"""Search with context from conversation history.
Args:
query: Search query
Returns:
Search results enhanced with conversation context
"""
messages = state["messages"]
# Extract context from recent messages
recent_topics = extract_topics_from_messages(messages[-5:])
# Enhance query with context
if recent_topics:
enhanced_query = f"{query} {' '.join(recent_topics)}"
print(f"Enhanced query with context: {enhanced_query}")
else:
enhanced_query = query
# Perform search
results = perform_search(enhanced_query)
return results
def extract_topics_from_messages(messages) -> list[str]:
"""Extract key topics from messages."""
# Implementation
topics = []
for msg in messages:
if hasattr(msg, 'content'):
# Simple topic extraction
words = msg.content.split()
topics.extend([w for w in words if len(w) > 5])
return list(set(topics))[:3]from typing import Annotated
from langchain.tools import tool, ToolException
from langchain.agents import AgentState
from langgraph.prebuilt import InjectedState
@tool
def restricted_operation(
operation: str,
state: Annotated[AgentState, InjectedState]
) -> str:
"""Perform an operation that requires certain state conditions.
Args:
operation: Operation to perform
Returns:
Operation result
"""
# Check if certain conditions are met in the conversation
messages = state["messages"]
# Check if user has been authenticated
authenticated = any(
"authenticated" in msg.content.lower()
for msg in messages
if msg.type == "ai"
)
if not authenticated:
raise ToolException(
"This operation requires authentication. "
"Please authenticate first by providing your credentials."
)
# Perform the restricted operation
result = execute_restricted_operation(operation)
return f"Operation '{operation}' completed successfully: {result}"InjectedStore injects the shared store into tools, enabling persistent data storage across agent executions and threads.
InjectedStore = Annotated[BaseStore, InjectionMarker]Purpose: Access persistent storage for cross-thread data sharing
Usage: Use to read/write data that persists across agent executions
from typing import Annotated
from langchain.tools import tool
from langgraph.prebuilt import InjectedStore
from langgraph.store import BaseStore
@tool
def save_user_preference(
key: str,
value: str,
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Save a user preference.
Args:
key: Preference key
value: Preference value
Returns:
Confirmation message
"""
# Store data that persists across conversations
namespace = ("user_preferences",)
store.put(namespace, key, {"value": value})
return f"Saved preference: {key} = {value}"
@tool
def get_user_preference(
key: str,
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Retrieve a user preference.
Args:
key: Preference key
Returns:
Preference value or not found message
"""
namespace = ("user_preferences",)
item = store.get(namespace, key)
if item:
return f"{key} = {item.value['value']}"
else:
return f"No preference found for key: {key}"
# Create agent with store
from langgraph.store.memory import InMemoryStore
store = InMemoryStore()
agent = create_agent(
model="openai:gpt-4o",
tools=[save_user_preference, get_user_preference],
store=store
)from typing import Annotated
from langchain.tools import tool
from langgraph.prebuilt import InjectedStore
from langgraph.store import BaseStore
import json
@tool
def save_analysis_result(
analysis_id: str,
results: dict,
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Save analysis results for later retrieval.
Args:
analysis_id: Unique identifier for the analysis
results: Analysis results as a dictionary
Returns:
Confirmation message
"""
namespace = ("analyses",)
# Store with metadata
store.put(
namespace,
analysis_id,
{
"results": results,
"timestamp": time.time(),
"version": "1.0"
}
)
return f"Analysis {analysis_id} saved successfully"
@tool
def get_analysis_result(
analysis_id: str,
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Retrieve previously saved analysis results.
Args:
analysis_id: Unique identifier for the analysis
Returns:
Analysis results or not found message
"""
namespace = ("analyses",)
item = store.get(namespace, analysis_id)
if item:
data = item.value
age = time.time() - data["timestamp"]
return f"Analysis {analysis_id} (saved {age:.0f}s ago): {json.dumps(data['results'])}"
else:
return f"No analysis found with ID: {analysis_id}"from typing import Annotated
from langchain.tools import tool
from langgraph.prebuilt import InjectedStore
from langgraph.store import BaseStore
@tool
def increment_global_counter(
counter_name: str,
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Increment a global counter shared across all conversations.
Args:
counter_name: Name of the counter
Returns:
New counter value
"""
namespace = ("global_counters",)
# Get current value
item = store.get(namespace, counter_name)
current_value = item.value.get("count", 0) if item else 0
# Increment and save
new_value = current_value + 1
store.put(namespace, counter_name, {"count": new_value})
return f"Counter '{counter_name}' is now at {new_value}"
@tool
def get_global_stats(
store: Annotated[BaseStore, InjectedStore]
) -> str:
"""Get statistics from the global store.
Returns:
Statistics summary
"""
namespace = ("global_counters",)
# Search for all counters
items = store.search(namespace)
if not items:
return "No statistics available yet"
stats = []
for item in items:
name = item.key
count = item.value.get("count", 0)
stats.append(f"{name}: {count}")
return "Global statistics:\n" + "\n".join(stats)from typing import Annotated
from langchain.tools import tool
from langgraph.prebuilt import InjectedStore
from langgraph.store import BaseStore
import hashlib
import json
@tool
def cached_api_call(
endpoint: str,
params: dict,
store: Annotated[BaseStore, InjectedStore]
) -> dict:
"""Make an API call with result caching.
Args:
endpoint: API endpoint
params: API parameters
Returns:
API response (may be cached)
"""
# Create cache key
cache_key = hashlib.md5(
f"{endpoint}:{json.dumps(params, sort_keys=True)}".encode()
).hexdigest()
namespace = ("api_cache",)
# Check cache
cached = store.get(namespace, cache_key)
if cached:
cache_age = time.time() - cached.value["timestamp"]
if cache_age < 3600: # 1 hour cache
print(f"Cache hit! Age: {cache_age:.0f}s")
return cached.value["data"]
# Cache miss - make API call
print("Cache miss - fetching from API")
response = requests.get(f"https://api.example.com{endpoint}", params=params)
data = response.json()
# Store in cache
store.put(
namespace,
cache_key,
{
"data": data,
"timestamp": time.time()
}
)
return dataToolRuntime provides comprehensive runtime context during tool execution, including configuration, store access, and execution metadata.
class ToolRuntime:
"""Runtime context during tool execution.
Provides access to:
- Configuration (config)
- Store (persistent storage)
- Runtime metadata
"""Purpose: Access runtime configuration and execution context
Available Attributes:
config: Runtime configuration dictionarystore: BaseStore instance for persistencefrom typing import Annotated
from langchain.tools import tool, InjectedToolArg
from langgraph.prebuilt import ToolRuntime
@tool
def environment_aware_tool(
action: str,
runtime: Annotated[ToolRuntime, InjectedToolArg]
) -> str:
"""Tool that adapts behavior based on runtime configuration.
Args:
action: Action to perform
Returns:
Action result
"""
# Access configuration
config = runtime.config
# Check environment
environment = config.get("environment", "production")
debug_mode = config.get("debug", False)
if debug_mode:
print(f"Debug: Running action '{action}' in {environment} environment")
# Adapt behavior based on environment
if environment == "development":
result = f"[DEV] Executed {action} with relaxed validation"
else:
result = f"Executed {action} with full validation"
return resultfrom typing import Annotated
from langchain.tools import tool, InjectedToolArg
from langgraph.prebuilt import ToolRuntime
@tool
def runtime_store_tool(
data_key: str,
data_value: str,
runtime: Annotated[ToolRuntime, InjectedToolArg]
) -> str:
"""Tool that uses the runtime's store.
Args:
data_key: Key for data storage
data_value: Value to store
Returns:
Confirmation message
"""
# Access store through runtime
store = runtime.store
if store:
namespace = ("runtime_data",)
store.put(namespace, data_key, {"value": data_value})
return f"Stored {data_key} in runtime store"
else:
return "No store available in runtime"from typing import Annotated
from langchain.tools import tool, InjectedToolArg
from langgraph.prebuilt import ToolRuntime
@tool
def runtime_info_tool(
runtime: Annotated[ToolRuntime, InjectedToolArg]
) -> str:
"""Get information about the current runtime.
Returns:
Runtime information
"""
config = runtime.config
# Extract runtime information
thread_id = config.get("configurable", {}).get("thread_id", "unknown")
checkpoint_id = config.get("configurable", {}).get("checkpoint_id", "none")
info = f"Runtime Info:\n"
info += f"- Thread ID: {thread_id}\n"
info += f"- Checkpoint ID: {checkpoint_id}\n"
info += f"- Store available: {runtime.store is not None}\n"
return infofrom typing import Annotated
from langchain.tools import tool, InjectedToolArg, InjectedToolCallId
from langchain.agents import AgentState
from langgraph.prebuilt import InjectedState, InjectedStore, ToolRuntime
from langgraph.store import BaseStore
@tool
def comprehensive_tool(
query: str,
state: Annotated[AgentState, InjectedState],
store: Annotated[BaseStore, InjectedStore],
call_id: Annotated[str, InjectedToolCallId],
runtime: Annotated[ToolRuntime, InjectedToolArg]
) -> str:
"""Tool with all injection types.
Args:
query: User query to process
Returns:
Comprehensive result
"""
# Log with call ID
print(f"[{call_id}] Processing query: {query}")
# Access state
message_count = len(state["messages"])
print(f"[{call_id}] Message history length: {message_count}")
# Access store
namespace = ("tool_calls",)
store.put(namespace, call_id, {
"query": query,
"message_count": message_count,
"timestamp": time.time()
})
# Access runtime config
config = runtime.config
environment = config.get("environment", "production")
result = f"Processed '{query}' in {environment} environment. "
result += f"Conversation has {message_count} messages. "
result += f"Call tracked with ID: {call_id}"
return resultfrom langchain_core.tools import InjectedToolArg, InjectedToolCallId
from langgraph.prebuilt import InjectedState, InjectedStore, ToolRuntime
from langgraph.store import BaseStore
from typing import Annotated, Any
# Injection annotations
InjectedToolArg = Annotated[T, InjectionMarker]
InjectedToolCallId = Annotated[str, InjectionMarker]
InjectedState = Annotated[StateType, InjectionMarker]
InjectedStore = Annotated[BaseStore, InjectionMarker]
# Runtime context
class ToolRuntime:
"""Runtime context during tool execution."""
config: dict
store: BaseStore | None
...Install with Tessl CLI
npx tessl i tessl/pypi-langchain@1.2.1