Loading...
Loading...
Multi-agent systems with LangGraph - supervisor/swarm/handoff/router patterns, state coordination, Deep Agents, guardrails, testing, observability, deployment. Use when building multi-agent workflows, coordinating agents, or need cost-optimized orchestration. Uses Claude, DeepSeek, Gemini (no OpenAI).
npx skill4agent add scientiacapital/skills langgraph-agentsfrom typing import TypedDict, Annotated
from langgraph.graph import add_messages
class AgentState(TypedDict, total=False):
messages: Annotated[list, add_messages] # Auto-merge
next_agent: str # For handoffs| Pattern | When | Agents |
|---|---|---|
| Supervisor | Clear hierarchy | 3-10 |
| Swarm | Peer collaboration | 5-15 |
| Handoff | Sequential pipeline | 2-5 |
| Router | Classify and dispatch | 2-10 |
| Master | Learning systems | 10-30+ |
@entrypoint@taskpip install langchain langgraph langgraph-supervisor langgraph-swarm langchain-mcp-adaptersAnnotated[..., add_messages]| Pattern | Use When | Complexity | Reference |
|---|---|---|---|
| Supervisor | Clear hierarchy, centralized routing | Low-Medium | |
| Swarm | Peer collaboration, dynamic handoffs | Medium | |
| Handoff | Sequential pipelines, escalation | Low | |
| Router | Classify-and-dispatch, fan-out | Low | |
| Skills | Progressive disclosure, on-demand | Low | |
| Master | Learning systems, complex workflows | High | |
from typing import TypedDict, Annotated, Dict, Any
from langchain_core.messages import BaseMessage
from langgraph.graph import add_messages
class AgentState(TypedDict, total=False):
messages: Annotated[list[BaseMessage], add_messages] # Auto-merge
agent_type: str
metadata: Dict[str, Any]
next_agent: str # For handoffsreference/state-schemas.md# Use lang-core for unified provider access (NO OPENAI)
from lang_core.providers import get_llm_for_task, LLMPriority
llm_cheap = get_llm_for_task(priority=LLMPriority.COST) # DeepSeek
llm_smart = get_llm_for_task(priority=LLMPriority.QUALITY) # Claude
llm_fast = get_llm_for_task(priority=LLMPriority.SPEED) # Cerebras
llm_local = get_llm_for_task(priority=LLMPriority.LOCAL) # Ollamareference/base-agent-architecture.mdreference/cost-optimization.mdfrom langgraph_supervisor import create_supervisor # pip install langgraph-supervisor
from langgraph.prebuilt import create_react_agent
research_agent = create_react_agent(model, tools=research_tools, prompt="Research specialist")
writer_agent = create_react_agent(model, tools=writer_tools, prompt="Content writer")
supervisor = create_supervisor(agents=[research_agent, writer_agent], model=model)
result = supervisor.invoke({"messages": [("user", "Write article about LangGraph")]})from langgraph_swarm import create_swarm, create_handoff_tool # pip install langgraph-swarm
handoff_to_bob = create_handoff_tool(agent_name="Bob", description="Transfer for Python tasks")
alice = create_react_agent(model, tools=[query_db, handoff_to_bob], prompt="SQL expert")
bob = create_react_agent(model, tools=[execute_code], prompt="Python expert")
swarm = create_swarm(agents=[alice, bob], default_active_agent="Alice")from langgraph.func import entrypoint, task
from langgraph.checkpoint.memory import InMemorySaver
@task
def research(query: str) -> str:
return f"Results for: {query}"
@entrypoint(checkpointer=InMemorySaver())
def workflow(query: str) -> dict:
result = research(query).result()
return {"output": result}reference/functional-api.mdfrom langchain_mcp_adapters.client import MultiServerMCPClient
async with MultiServerMCPClient(
{"tools": {"transport": "stdio", "command": "python", "args": ["./mcp_server.py"]}}
) as client:
tools = await client.get_tools()
agent = create_react_agent(model, tools=tools)reference/mcp-integration.mdfrom deep_agents import create_deep_agent
from deep_agents.backends import CompositeBackend, StateBackend, StoreBackend
backend = CompositeBackend({
"/workspace/": StateBackend(), # Ephemeral
"/memories/": StoreBackend() # Persistent
})
agent = create_deep_agent(
model=ChatAnthropic(model="claude-opus-4-6"),
backend=backend,
interrupt_on=["deploy", "delete"],
skills_dirs=["./skills/"]
)reference/deep-agents.md# Recursion limit prevents runaway agents (default: 25 steps)
config = {"recursion_limit": 25, "configurable": {"thread_id": "user-123"}}
result = graph.invoke(input_data, config=config)
# Add guardrail nodes for PII, safety checks, HITL — see referencereference/guardrails.mdreference/state-schemas.mdreference/base-agent-architecture.mdreference/tools-organization.mdreference/orchestration-patterns.mdreference/context-engineering.mdreference/cost-optimization.mdreference/functional-api.mdreference/mcp-integration.mdreference/deep-agents.mdreference/streaming-patterns.mdreference/guardrails.mdreference/testing-patterns.mdreference/observability.mdreference/deployment-patterns.md| Issue | Solution |
|---|---|
| State not updating | Add |
| Infinite loops | Add termination condition or set |
| High costs | Route simple tasks to cheaper models; use fallback chains |
| Context loss | Use checkpointers or memory systems |
| Wrong imports | |
| Wrong imports | |
| MCP API mismatch | Use |
| PII leakage | Add PII redaction guard node (see |
| No observability | Set |
| Fragile agents | Add guardrails: call limits, budget tripwires, structured output |
@traced_agentget_llm_for_task(priority=...)from lang_core import traced_agent, get_llm_for_task, LLMPriority
from lang_core.middleware import budget_enforcement_middleware
@traced_agent("QualificationAgent", tags=["sales"])
async def run_qualification(data):
llm = get_llm_for_task(priority=LLMPriority.SPEED)
# ... agent logic~/.claude/skill-analytics/last-outcome-langgraph-agents.json{"ts":"[UTC ISO8601]","skill":"langgraph-agents","version":"2.0.0","variant":"default",
"status":"[success|partial|error]","runtime_ms":[estimated ms from start],
"metrics":{"agents_created":[n],"nodes_configured":[n],"graphs_built":[n]},
"error":null,"session_id":"[YYYY-MM-DD]"}