TypeScript framework for building LLM-powered applications with agents, tools, middleware, and model interoperability
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Pause agent execution for human review and approval of tool calls or model responses.
/**
* Create human-in-the-loop middleware
* @param config - HITL configuration
* @returns HITL middleware instance
*/
function humanInTheLoopMiddleware(
config: HumanInTheLoopMiddlewareConfig
): AgentMiddleware;
interface HumanInTheLoopMiddlewareConfig {
/**
* When to interrupt for human review
*/
interruptOn?: InterruptOnConfig;
/**
* Configuration for review interface
*/
reviewConfig?: ReviewConfig;
}/**
* Configuration for when to interrupt
*/
type InterruptOnConfig =
| "tools" // Interrupt before tool execution
| "tool_results" // Interrupt after tool execution
| ((state: State, toolCalls?: ToolCall[]) => boolean); // Custom logic
interface ReviewConfig {
/**
* Factory for generating action descriptions
*/
descriptionFactory?: DescriptionFactory;
}
type DescriptionFactory = (
action: Action,
state: State
) => string | Promise<string>;/**
* Request for human review
*/
interface HITLRequest {
/**
* Action requiring review
*/
action: Action;
/**
* Current agent state
*/
state: State;
/**
* Description of the action
*/
description?: string;
}
/**
* Human decision response
*/
interface HITLResponse {
/**
* Decision made by human
*/
decision: Decision;
/**
* Optional feedback or modifications
*/
feedback?: string;
}
/**
* Union of possible decisions
*/
type Decision = ApproveDecision | EditDecision | RejectDecision;
/**
* Approve the action
*/
interface ApproveDecision {
type: "approve";
}
/**
* Edit and resubmit the action
*/
interface EditDecision {
type: "edit";
modifications: Record<string, any>;
}
/**
* Reject the action
*/
interface RejectDecision {
type: "reject";
reason?: string;
}
/**
* Action interface
*/
interface Action {
type: "tool_call" | "model_response";
payload: any;
}
/**
* Action request interface
*/
interface ActionRequest extends Action {
requestId: string;
timestamp: number;
}
/**
* Decision type enum
*/
type DecisionType = "approve" | "edit" | "reject";import { createAgent, humanInTheLoopMiddleware, tool } from "langchain";
import { z } from "zod";
// Create tool that requires approval
const sendEmail = tool(
async ({ to, subject, body }) => {
await emailService.send({ to, subject, body });
return `Email sent to ${to}`;
},
{
name: "send_email",
description: "Send an email",
schema: z.object({
to: z.string().email(),
subject: z.string(),
body: z.string(),
}),
}
);
// Create HITL middleware
const hitl = humanInTheLoopMiddleware({
interruptOn: "tools", // Interrupt before tool execution
});
// Create agent with HITL
const agent = createAgent({
model: "openai:gpt-4o",
tools: [sendEmail],
middleware: [hitl],
checkpointer: checkpointer, // Required for interrupts
});
// Start conversation
const result = await agent.invoke(
{
messages: [{ role: "user", content: "Send email to john@example.com" }],
},
{
configurable: { thread_id: "thread-123" },
}
);
// Agent will interrupt when tool is about to be called
// Resume after human approval
const resumed = await agent.invoke(
{
messages: [], // Empty to resume
},
{
configurable: {
thread_id: "thread-123",
// Human decision goes here
},
}
);import { humanInTheLoopMiddleware } from "langchain";
// Interrupt only for specific tools
const hitl = humanInTheLoopMiddleware({
interruptOn: (state, toolCalls) => {
// Review only dangerous operations
const dangerousTools = ["delete_file", "send_email", "make_payment"];
return (
toolCalls?.some((call) => dangerousTools.includes(call.name)) ?? false
);
},
});import { humanInTheLoopMiddleware } from "langchain";
const hitl = humanInTheLoopMiddleware({
interruptOn: "tools",
reviewConfig: {
descriptionFactory: async (action, state) => {
if (action.type === "tool_call") {
const { name, args } = action.payload;
return `Agent wants to call ${name} with arguments: ${JSON.stringify(
args,
null,
2
)}`;
}
return "Action requires review";
},
},
});import { humanInTheLoopMiddleware } from "langchain";
// Review tool results before continuing
const hitl = humanInTheLoopMiddleware({
interruptOn: "tool_results",
});
// Use case: Verify API responses before agent acts on them
const agent = createAgent({
model: "openai:gpt-4o",
tools: [apiTool],
middleware: [hitl],
checkpointer: checkpointer,
});import { humanInTheLoopMiddleware, createAgent } from "langchain";
const hitl = humanInTheLoopMiddleware({
interruptOn: "tools",
});
const agent = createAgent({
model: "openai:gpt-4o",
tools: [tool1, tool2],
middleware: [hitl],
checkpointer: checkpointer,
});
// Initial invocation - will interrupt
await agent.invoke(
{ messages: [{ role: "user", content: "Do something" }] },
{ configurable: { thread_id: "t1" } }
);
// Resume with approval
await agent.invoke(
{ messages: [] },
{
configurable: {
thread_id: "t1",
decision: { type: "approve" },
},
}
);
// Or resume with edit
await agent.invoke(
{ messages: [] },
{
configurable: {
thread_id: "t1",
decision: {
type: "edit",
modifications: { to: "different@email.com" },
},
},
}
);
// Or resume with rejection
await agent.invoke(
{ messages: [] },
{
configurable: {
thread_id: "t1",
decision: {
type: "reject",
reason: "Insufficient information",
},
},
}
);import {
createAgent,
humanInTheLoopMiddleware,
toolCallLimitMiddleware,
piiMiddleware,
} from "langchain";
// HITL with other safety middleware
const agent = createAgent({
model: "openai:gpt-4o",
tools: [tools],
middleware: [
piiMiddleware({ builtInTypes: ["email", "credit_card"] }), // Check PII first
toolCallLimitMiddleware({ runLimit: 5 }), // Limit tool calls
humanInTheLoopMiddleware({ interruptOn: "tools" }), // Then human review
],
checkpointer: checkpointer,
});interruptOn: "tools" for preventive reviewinterruptOn: "tool_results" for verificationdescriptionFactorydocs