Fully autonomous AI agents are powerful — but not every decision should run without a human in the loop. When actions are irreversible, stakes are high, or confidence is uncertain, pausing for human review is the right engineering choice.
LangGraph's interrupt() function is the cleanest way to build these workflows. It checkpoints the graph state, pauses execution, and waits for human input before resuming — all without blocking a thread or losing work. These six examples cover the most important HITL patterns, from email draft review to medical diagnosis oversight.
For the foundational concept, see the Human-in-the-Loop glossary entry and the LangChain tutorial for graph setup basics.
Example 1: LangGraph interrupt() Approval Gate#
The simplest HITL pattern: an agent drafts an action, interrupts for approval, then either executes or cancels based on the human response. This is the building block for every other example in this guide.
import os
from typing import TypedDict, Literal
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
class AgentState(TypedDict):
task: str
draft_action: str
human_decision: str # "approve" | "reject" | "edit:<new_content>"
final_action: str
status: str
def draft_node(state: AgentState) -> AgentState:
"""Agent drafts the action."""
response = llm.invoke(
f"Draft a professional action for this task: {state['task']}\n"
"Return only the draft content, nothing else."
)
return {**state, "draft_action": response.content}
def review_node(state: AgentState) -> AgentState:
"""Interrupt for human approval."""
decision = interrupt({
"message": "Review the drafted action below and respond with your decision.",
"draft": state["draft_action"],
"options": [
"approve — execute as drafted",
"reject — cancel the action",
"edit:<your revised content> — replace draft with your version",
],
})
return {**state, "human_decision": decision}
def execute_node(state: AgentState) -> AgentState:
"""Execute or cancel based on the human decision."""
decision = state.get("human_decision", "reject")
if decision == "approve":
final = state["draft_action"]
status = "executed"
elif decision.startswith("edit:"):
final = decision[5:].strip()
status = "executed_with_edits"
else:
final = ""
status = "cancelled"
return {**state, "final_action": final, "status": status}
# Build graph
builder = StateGraph(AgentState)
builder.add_node("draft", draft_node)
builder.add_node("review", review_node)
builder.add_node("execute", execute_node)
builder.set_entry_point("draft")
builder.add_edge("draft", "review")
builder.add_edge("review", "execute")
builder.add_edge("execute", END)
checkpointer = MemorySaver()
graph = builder.compile(checkpointer=checkpointer)
# Run the graph — it will pause at review_node
thread = {"configurable": {"thread_id": "task-001"}}
initial_state = {"task": "Send a follow-up email to the Q3 prospects list"}
for event in graph.stream(initial_state, thread, stream_mode="values"):
if "__interrupt__" in event:
print("INTERRUPT — Human review required:")
print(event["__interrupt__"][0].value["draft"])
# Resume with human decision
result = graph.invoke(
Command(resume="approve"),
thread,
)
print(f"Status: {result['status']}")
print(f"Final action: {result['final_action'][:100]}")
The graph pauses at review_node and the interrupt() call serialises the payload into the checkpoint. Any process can resume the graph by calling graph.invoke(Command(resume=<decision>), thread) with the same thread_id. State is never lost even across server restarts when using a durable checkpointer.
Example 2: Email Draft Review Before Send#
A common source of costly mistakes is sending incorrect or poorly-worded emails. This agent drafts outbound emails and requires human sign-off before the send API is called.
import os
from typing import TypedDict, Optional
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
class EmailState(TypedDict):
recipient_name: str
recipient_email: str
context: str # briefing notes for the email
subject: str
body: str
approved: bool
send_result: Optional[str]
def draft_email_node(state: EmailState) -> EmailState:
response = llm.invoke(
f"Write a professional email based on this briefing:\n{state['context']}\n\n"
f"Recipient: {state['recipient_name']} <{state['recipient_email']}>\n\n"
"Return JSON with keys: subject, body"
)
import json, re
text = response.content
match = re.search(r'\{.*\}', text, re.DOTALL)
if match:
data = json.loads(match.group())
return {**state, "subject": data["subject"], "body": data["body"]}
return {**state, "subject": "Follow-up", "body": text}
def human_review_node(state: EmailState) -> EmailState:
decision = interrupt({
"message": "Review the email draft below.",
"to": f"{state['recipient_name']} <{state['recipient_email']}>",
"subject": state["subject"],
"body": state["body"],
"instructions": (
"Reply 'approve' to send as-is, "
"'reject' to cancel, "
"or 'edit_subject:<new subject>' / 'edit_body:<new body>' to modify."
),
})
approved = False
subject = state["subject"]
body = state["body"]
if decision == "approve":
approved = True
elif decision.startswith("edit_subject:"):
subject = decision[13:].strip()
approved = True
elif decision.startswith("edit_body:"):
body = decision[10:].strip()
approved = True
return {**state, "approved": approved, "subject": subject, "body": body}
def send_email_node(state: EmailState) -> EmailState:
if not state.get("approved"):
return {**state, "send_result": "cancelled — not approved"}
# Replace with your real send implementation (SendGrid, SES, etc.)
print(f"[SEND] To: {state['recipient_email']}")
print(f"[SEND] Subject: {state['subject']}")
print(f"[SEND] Body preview: {state['body'][:100]}...")
return {**state, "send_result": "sent"}
builder = StateGraph(EmailState)
builder.add_node("draft", draft_email_node)
builder.add_node("review", human_review_node)
builder.add_node("send", send_email_node)
builder.set_entry_point("draft")
builder.add_edge("draft", "review")
builder.add_edge("review", "send")
builder.add_edge("send", END)
graph = builder.compile(checkpointer=MemorySaver())
thread = {"configurable": {"thread_id": "email-001"}}
initial = {
"recipient_name": "Sarah Chen",
"recipient_email": "sarah@prospect.com",
"context": "Follow up on Q3 demo. She asked about pricing. Offer a 15% discount for annual.",
"subject": "", "body": "", "approved": False, "send_result": None,
}
for event in graph.stream(initial, thread, stream_mode="values"):
if "__interrupt__" in event:
payload = event["__interrupt__"][0].value
print(f"\nDraft ready for review:")
print(f"Subject: {payload['subject']}")
print(f"Body:\n{payload['body']}\n")
# Human approves
final = graph.invoke(Command(resume="approve"), thread)
print(f"Result: {final['send_result']}")
The email is never sent unless approved=True reaches the send node. The interrupt() payload includes the full draft so reviewers can read and decide without navigating to another system.
Example 3: Financial Transaction Approval Workflow#
Financial operations demand multi-level authorisation. This agent analyses a transaction, determines the required approval tier from amount and risk, and routes to the correct approver via interrupt().
from typing import TypedDict, Literal, List
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
import json
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
APPROVAL_TIERS = {
"auto": {"max_amount": 500, "max_risk": 0.15},
"team_lead": {"max_amount": 5_000, "max_risk": 0.35},
"manager": {"max_amount": 25_000, "max_risk": 0.55},
"director": {"max_amount": 100_000, "max_risk": 0.75},
"cfo": {"max_amount": float("inf"), "max_risk": 1.0},
}
class TxnState(TypedDict):
transaction: dict
risk_score: float
risk_factors: List[str]
required_tier: str
recommendation: str
human_decision: str
executed: bool
def analyse_node(state: TxnState) -> TxnState:
txn = state["transaction"]
response = llm.invoke(
f"Analyse this financial transaction:\n{json.dumps(txn, indent=2)}\n\n"
"Return JSON: {\"risk_score\": float, \"risk_factors\": [str], "
"\"recommendation\": \"approve|investigate|reject\"}"
)
text = response.content
match = __import__("re").search(r'\{.*\}', text, __import__("re").DOTALL)
data = json.loads(match.group()) if match else {}
risk = data.get("risk_score", 0.5)
amount = txn.get("amount", 0)
tier = "auto"
for t, limits in APPROVAL_TIERS.items():
if amount <= limits["max_amount"] and risk <= limits["max_risk"]:
tier = t
break
tier = t # escalate to next tier
return {
**state,
"risk_score": risk,
"risk_factors": data.get("risk_factors", []),
"recommendation": data.get("recommendation", "investigate"),
"required_tier": tier,
}
def approval_node(state: TxnState) -> TxnState:
if state["required_tier"] == "auto":
return {**state, "human_decision": "auto_approved"}
decision = interrupt({
"message": f"Transaction requires {state['required_tier'].upper()} approval.",
"transaction": state["transaction"],
"ai_recommendation": state["recommendation"],
"risk_score": state["risk_score"],
"risk_factors": state["risk_factors"],
"instructions": "Reply 'approve' or 'reject'.",
})
return {**state, "human_decision": decision}
def execute_node(state: TxnState) -> TxnState:
approved = state["human_decision"] in ("approve", "auto_approved")
if approved:
print(f"Executing transaction {state['transaction'].get('id')}: "
f"${state['transaction'].get('amount')}")
else:
print(f"Transaction {state['transaction'].get('id')} rejected.")
return {**state, "executed": approved}
builder = StateGraph(TxnState)
builder.add_node("analyse", analyse_node)
builder.add_node("approval", approval_node)
builder.add_node("execute", execute_node)
builder.set_entry_point("analyse")
builder.add_edge("analyse", "approval")
builder.add_edge("approval", "execute")
builder.add_edge("execute", END)
graph = builder.compile(checkpointer=MemorySaver())
thread = {"configurable": {"thread_id": "txn-002"}}
state = {
"transaction": {"id": "TXN-9821", "type": "refund", "amount": 12500.00,
"customer_id": "CUST-441", "reason": "contract cancellation"},
"risk_score": 0.0, "risk_factors": [], "required_tier": "",
"recommendation": "", "human_decision": "", "executed": False,
}
for event in graph.stream(state, thread, stream_mode="values"):
if "__interrupt__" in event:
payload = event["__interrupt__"][0].value
print(f"Approval required ({payload['message']})")
print(f"Risk: {payload['risk_score']:.2f} — {payload['risk_factors']}")
final = graph.invoke(Command(resume="approve"), thread)
print(f"Executed: {final['executed']}")
The tier escalation logic runs before the interrupt() so the correct approver channel receives the notification. Integrate the interrupt resume path with a Slack bot or internal admin UI to close the approval loop without requiring approvers to run Python.
Example 4: Content Moderation Pipeline#
User-generated content at scale requires a blend of automated filtering and human judgement. This agent pre-screens content with the LLM, auto-approves or auto-rejects clear cases, and routes borderline content to a human moderator via interrupt().
from typing import TypedDict, List, Optional, Literal
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
import json
llm = ChatAnthropic(model="claude-3-5-haiku-20241022")
class ModerationState(TypedDict):
content_id: str
content: str
content_type: str
auto_decision: Optional[str]
violation_flags: List[str]
severity: str # "none" | "mild" | "severe"
human_decision: Optional[str]
final_decision: str # "approved" | "rejected" | "flagged"
moderator_note: str
def screen_node(state: ModerationState) -> ModerationState:
response = llm.invoke(
f"Moderate this {state['content_type']} for policy violations:\n\n"
f"\"{state['content']}\"\n\n"
"Check for: hate speech, harassment, misinformation, spam, NSFW, self-harm.\n"
"Return JSON: {\"violation_flags\": [str], \"severity\": \"none|mild|severe\", "
"\"auto_decision\": \"approve|reject|needs_human_review\", \"reasoning\": str}"
)
text = response.content
match = __import__("re").search(r'\{.*\}', text, __import__("re").DOTALL)
data = json.loads(match.group()) if match else {}
return {
**state,
"violation_flags": data.get("violation_flags", []),
"severity": data.get("severity", "none"),
"auto_decision": data.get("auto_decision", "needs_human_review"),
}
def route_node(state: ModerationState) -> Literal["auto_resolve", "human_review"]:
if state["auto_decision"] in ("approve", "reject"):
return "auto_resolve"
return "human_review"
def auto_resolve_node(state: ModerationState) -> ModerationState:
decision = "approved" if state["auto_decision"] == "approve" else "rejected"
return {**state, "final_decision": decision, "moderator_note": "auto-moderated"}
def human_review_node(state: ModerationState) -> ModerationState:
decision = interrupt({
"message": "Borderline content requires human moderation.",
"content_id": state["content_id"],
"content": state["content"],
"ai_flags": state["violation_flags"],
"severity": state["severity"],
"instructions": (
"Reply 'approve', 'reject', or 'flag:<note>' to flag with a note."
),
})
if decision == "approve":
return {**state, "human_decision": "approve", "final_decision": "approved",
"moderator_note": "human approved"}
elif decision == "reject":
return {**state, "human_decision": "reject", "final_decision": "rejected",
"moderator_note": "human rejected"}
elif decision.startswith("flag:"):
note = decision[5:].strip()
return {**state, "human_decision": "flag", "final_decision": "flagged",
"moderator_note": note}
return {**state, "final_decision": "rejected", "moderator_note": "invalid response — rejected"}
builder = StateGraph(ModerationState)
builder.add_node("screen", screen_node)
builder.add_node("auto_resolve", auto_resolve_node)
builder.add_node("human_review", human_review_node)
builder.set_entry_point("screen")
builder.add_conditional_edges("screen", route_node,
{"auto_resolve": "auto_resolve", "human_review": "human_review"})
builder.add_edge("auto_resolve", END)
builder.add_edge("human_review", END)
graph = builder.compile(checkpointer=MemorySaver())
thread = {"configurable": {"thread_id": "mod-007"}}
state = {
"content_id": "POST-4492", "content_type": "forum_post",
"content": "This product is completely useless garbage, total waste of money.",
"auto_decision": None, "violation_flags": [], "severity": "",
"human_decision": None, "final_decision": "", "moderator_note": "",
}
for event in graph.stream(state, thread, stream_mode="values"):
if "__interrupt__" in event:
payload = event["__interrupt__"][0].value
print(f"Moderation required for {payload['content_id']}")
print(f"AI flags: {payload['ai_flags']}")
final = graph.invoke(Command(resume="approve"), thread)
print(f"Final decision: {final['final_decision']} — {final['moderator_note']}")
The conditional edge routes clearly safe or clearly violating content through the fast automated path and only sends genuinely borderline cases to the human queue. This keeps moderator workload manageable while ensuring edge cases receive human judgement.
Example 5: Medical Diagnosis Review Agent#
In healthcare applications, AI can accelerate triage and draft assessment summaries, but clinical decisions must remain with licensed practitioners. This pattern shows how to use interrupt() to enforce mandatory clinician review before any patient-facing action is taken.
from typing import TypedDict, List, Optional
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
import json
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
class ClinicalState(TypedDict):
patient_id: str
symptoms: List[str]
vitals: dict
medical_history: List[str]
ai_assessment: str
differential_diagnosis: List[dict]
urgency: str # "routine" | "urgent" | "emergent"
clinician_review: dict
approved_plan: str
notes: str
def triage_node(state: ClinicalState) -> ClinicalState:
response = llm.invoke(
"You are an AI clinical decision support tool. "
"Generate a preliminary assessment — this MUST be reviewed by a clinician.\n\n"
f"Symptoms: {state['symptoms']}\n"
f"Vitals: {state['vitals']}\n"
f"History: {state['medical_history']}\n\n"
"Return JSON: {\"assessment\": str, \"differential_diagnosis\": "
"[{\"condition\": str, \"probability\": str, \"reasoning\": str}], "
"\"urgency\": \"routine|urgent|emergent\", \"suggested_workup\": [str]}"
)
text = response.content
match = __import__("re").search(r'\{.*\}', text, __import__("re").DOTALL)
data = json.loads(match.group()) if match else {}
return {
**state,
"ai_assessment": data.get("assessment", ""),
"differential_diagnosis": data.get("differential_diagnosis", []),
"urgency": data.get("urgency", "routine"),
}
def clinician_review_node(state: ClinicalState) -> ClinicalState:
"""MANDATORY clinician review — no bypass path exists."""
review = interrupt({
"message": "CLINICIAN REVIEW REQUIRED — AI assessment below is preliminary only.",
"patient_id": state["patient_id"],
"ai_assessment": state["ai_assessment"],
"differential": state["differential_diagnosis"],
"urgency": state["urgency"],
"disclaimer": (
"This AI output is a decision-support tool only. "
"Clinical judgement of a licensed practitioner is required before any action."
),
"instructions": (
"Reply JSON: {\"approved_plan\": str, \"modifications\": str, "
"\"clinician_id\": str, \"override_urgency\": str|null}"
),
})
plan_data = {}
if isinstance(review, str):
match = __import__("re").search(r'\{.*\}', review, __import__("re").DOTALL)
if match:
plan_data = json.loads(match.group())
return {
**state,
"clinician_review": plan_data,
"approved_plan": plan_data.get("approved_plan", ""),
"notes": plan_data.get("modifications", ""),
}
builder = StateGraph(ClinicalState)
builder.add_node("triage", triage_node)
builder.add_node("clinician_review", clinician_review_node)
builder.set_entry_point("triage")
builder.add_edge("triage", "clinician_review")
builder.add_edge("clinician_review", END)
graph = builder.compile(checkpointer=MemorySaver())
thread = {"configurable": {"thread_id": "case-1102"}}
state = {
"patient_id": "PT-1102",
"symptoms": ["chest tightness", "shortness of breath", "diaphoresis"],
"vitals": {"BP": "158/94", "HR": 102, "SpO2": 96, "Temp": 37.2},
"medical_history": ["hypertension", "type 2 diabetes"],
"ai_assessment": "", "differential_diagnosis": [], "urgency": "",
"clinician_review": {}, "approved_plan": "", "notes": "",
}
for event in graph.stream(state, thread, stream_mode="values"):
if "__interrupt__" in event:
payload = event["__interrupt__"][0].value
print(f"\nClinician review required for {payload['patient_id']}")
print(f"AI urgency assessment: {payload['urgency'].upper()}")
print(f"Top differential: {payload['differential'][0] if payload['differential'] else 'N/A'}")
# Simulate clinician response
clinician_input = json.dumps({
"approved_plan": "ECG, troponin, CXR stat. Cardiology consult.",
"modifications": "Upgraded urgency to emergent based on symptom cluster.",
"clinician_id": "DR-SMITH",
"override_urgency": "emergent"
})
final = graph.invoke(Command(resume=clinician_input), thread)
print(f"\nApproved plan: {final['approved_plan']}")
print(f"Notes: {final['notes']}")
There is deliberately no conditional routing away from clinician_review_node — the graph always interrupts and always requires a clinician response. This hard constraint is enforced structurally by the graph topology, not by application logic that could be bypassed.
Example 6: Code Deployment Approval Agent#
Deploying code to production is an irreversible action with potentially wide impact. This agent prepares a deployment summary, runs pre-flight checks, and requires explicit human sign-off before triggering the deploy pipeline.
import subprocess
from typing import TypedDict, List, Optional
from langgraph.graph import StateGraph, END
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import interrupt, Command
from langchain_anthropic import ChatAnthropic
import json
llm = ChatAnthropic(model="claude-3-5-sonnet-20241022")
class DeployState(TypedDict):
service: str
version: str
environment: str # "staging" | "production"
diff_summary: str
preflight_checks: List[dict]
risk_assessment: str
risk_level: str # "low" | "medium" | "high"
approver_decision: str
deploy_result: Optional[str]
def analyse_diff_node(state: DeployState) -> DeployState:
# In a real pipeline, fetch the actual diff from your VCS API
mock_diff = f"Service: {state['service']} v{state['version']}\n+ 3 new API endpoints\n~ auth middleware updated\n- legacy rate limiter removed"
response = llm.invoke(
f"Analyse this deployment diff and assess risk:\n{mock_diff}\n\n"
f"Target environment: {state['environment']}\n\n"
"Return JSON: {\"diff_summary\": str, \"risk_assessment\": str, "
"\"risk_level\": \"low|medium|high\", "
"\"concerns\": [str], \"rollback_complexity\": str}"
)
text = response.content
match = __import__("re").search(r'\{.*\}', text, __import__("re").DOTALL)
data = json.loads(match.group()) if match else {}
return {
**state,
"diff_summary": data.get("diff_summary", mock_diff),
"risk_assessment": data.get("risk_assessment", ""),
"risk_level": data.get("risk_level", "medium"),
}
def preflight_node(state: DeployState) -> DeployState:
checks = [
{"name": "unit_tests", "status": "pass", "detail": "847 tests, 0 failures"},
{"name": "lint", "status": "pass", "detail": "no violations"},
{"name": "staging_smoke", "status": "pass", "detail": "all endpoints healthy"},
{"name": "coverage", "status": "warn", "detail": "82% (threshold: 85%)"},
]
return {**state, "preflight_checks": checks}
def approval_node(state: DeployState) -> DeployState:
checks_display = "\n".join(
f" {'OK' if c['status'] == 'pass' else 'WARN' if c['status'] == 'warn' else 'FAIL'} "
f"{c['name']}: {c['detail']}"
for c in state["preflight_checks"]
)
decision = interrupt({
"message": f"Deployment approval required for {state['environment'].upper()}.",
"service": state["service"],
"version": state["version"],
"diff_summary": state["diff_summary"],
"risk_level": state["risk_level"],
"risk_assessment": state["risk_assessment"],
"preflight_checks": checks_display,
"instructions": "Reply 'deploy', 'reject', or 'hold:<reason>'.",
})
return {**state, "approver_decision": decision}
def deploy_node(state: DeployState) -> DeployState:
decision = state.get("approver_decision", "reject")
if decision == "deploy":
print(f"Deploying {state['service']} v{state['version']} to {state['environment']}...")
# subprocess.run(["kubectl", "rollout", ...], check=True)
return {**state, "deploy_result": "success"}
elif decision.startswith("hold:"):
reason = decision[5:].strip()
return {**state, "deploy_result": f"held — {reason}"}
else:
return {**state, "deploy_result": "rejected — deployment cancelled"}
builder = StateGraph(DeployState)
builder.add_node("analyse_diff", analyse_diff_node)
builder.add_node("preflight", preflight_node)
builder.add_node("approval", approval_node)
builder.add_node("deploy", deploy_node)
builder.set_entry_point("analyse_diff")
builder.add_edge("analyse_diff", "preflight")
builder.add_edge("preflight", "approval")
builder.add_edge("approval", "deploy")
builder.add_edge("deploy", END)
graph = builder.compile(checkpointer=MemorySaver())
thread = {"configurable": {"thread_id": "deploy-v2.4.1"}}
state = {
"service": "api-gateway", "version": "2.4.1", "environment": "production",
"diff_summary": "", "preflight_checks": [], "risk_assessment": "",
"risk_level": "", "approver_decision": "", "deploy_result": None,
}
for event in graph.stream(state, thread, stream_mode="values"):
if "__interrupt__" in event:
payload = event["__interrupt__"][0].value
print(f"\nApproval required: {payload['message']}")
print(f"Risk level: {payload['risk_level'].upper()}")
print(f"Pre-flight:\n{payload['preflight_checks']}")
final = graph.invoke(Command(resume="deploy"), thread)
print(f"\nDeploy result: {final['deploy_result']}")
The coverage warning surfaces in the approval payload so the engineer can make an informed decision about whether that specific risk is acceptable for this release. Because the decision and all context are checkpointed, the approval serves as a natural audit record.
Designing the Right Level of Oversight#
Match the HITL pattern to the risk profile of the action:
| Risk Level | Action Type | Pattern |
|---|---|---|
| Low, reversible, high confidence | Routine notifications | Auto-execute with logging |
| Medium, partially reversible | Marketing emails, minor refunds | Async Slack approval with timeout |
| High, irreversible | Financial transactions, deployments | Synchronous interrupt() — no bypass |
| Regulated domain | Medical, legal, financial advice | Mandatory review enforced by graph topology |
The Human-in-the-Loop glossary entry covers the full theoretical framework. The LangChain tutorial shows how to wire LangGraph checkpointers to PostgreSQL for durable state in production.
Related Resources#
- LangChain Agent Examples — Foundational agent patterns with LangGraph
- Agentic RAG Examples — Add retrieval to your HITL workflows
- Browser Use Agent Examples — Apply HITL to browser automation
- AI Agent E-Commerce Examples — HITL for order and pricing decisions
- Computer Use Agent Examples — Oversight for desktop automation
- AI Agent Safety Patterns — Broader safety architecture beyond HITL
- Integrations Overview — Connect approval workflows to Slack, email, and ticketing systems
The FAQ section renders from the frontmatter faq array above.