deepagents-implementation
Compare original and translation side by side
🇺🇸
Original
English🇨🇳
Translation
ChineseDeep Agents Implementation
Deep Agents 实现指南
Core Concepts
核心概念
Deep Agents provides a batteries-included agent harness built on LangGraph:
- : Factory function that creates a configured agent
create_deep_agent - Middleware: Injected capabilities (filesystem, todos, subagents, summarization)
- Backends: Pluggable file storage (state, filesystem, store, composite)
- Subagents: Isolated task execution via the tool
task
The agent returned is a compiled LangGraph , compatible with streaming, checkpointing, and LangGraph Studio.
StateGraphDeep Agents 提供了一个基于LangGraph的、开箱即用的Agent框架:
- :用于创建已配置Agent的工厂函数
create_deep_agent - 中间件(Middleware):可注入的扩展能力(文件系统、待办事项、子Agent、摘要生成)
- 后端(Backends):可插拔的文件存储方案(状态存储、文件系统存储、持久化存储、复合存储)
- 子Agent(Subagents):通过工具实现的独立任务执行单元
task
返回的Agent是已编译的LangGraph ,支持流式传输、检查点和LangGraph Studio。
StateGraphEssential Imports
必要导入
python
undefinedpython
undefinedCore
Core
from deepagents import create_deep_agent
from deepagents import create_deep_agent
Subagents
Subagents
from deepagents import CompiledSubAgent
from deepagents import CompiledSubAgent
Backends
Backends
from deepagents.backends import (
StateBackend, # Ephemeral (default)
FilesystemBackend, # Real disk
StoreBackend, # Persistent cross-thread
CompositeBackend, # Route paths to backends
)
from deepagents.backends import (
StateBackend, # 临时存储(默认)
FilesystemBackend, # 本地磁盘存储
StoreBackend, # 跨线程持久化存储
CompositeBackend, # 多后端路由
)
LangGraph (for checkpointing, store, streaming)
LangGraph(用于检查点、存储、流式传输)
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.postgres import PostgresSaver
from langgraph.store.memory import InMemoryStore
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.postgres import PostgresSaver
from langgraph.store.memory import InMemoryStore
LangChain (for custom models, tools)
LangChain(用于自定义模型、工具)
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
undefinedfrom langchain.chat_models import init_chat_model
from langchain_core.tools import tool
undefinedBasic Usage
基础用法
Minimal Agent
最简Agent
python
from deepagents import create_deep_agentpython
from deepagents import create_deep_agentUses Claude Sonnet 4 by default
默认使用Claude Sonnet 4
agent = create_deep_agent()
result = agent.invoke({"messages": [{"role": "user", "content": "Hello!"}]})
undefinedagent = create_deep_agent()
result = agent.invoke({"messages": [{"role": "user", "content": "Hello!"}]})
undefinedWith Custom Tools
自定义工具
python
from langchain_core.tools import tool
from deepagents import create_deep_agent
@tool
def web_search(query: str) -> str:
"""Search the web for information."""
return tavily_client.search(query)
agent = create_deep_agent(
tools=[web_search],
system_prompt="You are a research assistant. Search the web to answer questions.",
)
result = agent.invoke({"messages": [{"role": "user", "content": "What is LangGraph?"}]})python
from langchain_core.tools import tool
from deepagents import create_deep_agent
@tool
def web_search(query: str) -> str:
"""在网络上搜索信息。"""
return tavily_client.search(query)
agent = create_deep_agent(
tools=[web_search],
system_prompt="你是一名研究助手,通过网络搜索回答问题。",
)
result = agent.invoke({"messages": [{"role": "user", "content": "什么是LangGraph?"}]})With Custom Model
自定义模型
python
from langchain.chat_models import init_chat_model
from deepagents import create_deep_agentpython
from langchain.chat_models import init_chat_model
from deepagents import create_deep_agentOpenAI
OpenAI模型
model = init_chat_model("openai:gpt-4o")
model = init_chat_model("openai:gpt-4o")
Or Anthropic with custom settings
或带自定义配置的Anthropic模型
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model_name="claude-sonnet-4-5-20250929", max_tokens=8192)
agent = create_deep_agent(model=model)
undefinedfrom langchain_anthropic import ChatAnthropic
model = ChatAnthropic(model_name="claude-sonnet-4-5-20250929", max_tokens=8192)
agent = create_deep_agent(model=model)
undefinedWith Checkpointing (Persistence)
检查点(持久化)
python
from langgraph.checkpoint.memory import InMemorySaver
from deepagents import create_deep_agent
agent = create_deep_agent(checkpointer=InMemorySaver())python
from langgraph.checkpoint.memory import InMemorySaver
from deepagents import create_deep_agent
agent = create_deep_agent(checkpointer=InMemorySaver())Must provide thread_id with checkpointer
使用检查点时必须提供thread_id
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
Resume conversation
恢复对话
result = agent.invoke({"messages": [{"role": "user", "content": "Follow up"}]}, config)
undefinedresult = agent.invoke({"messages": [{"role": "user", "content": "跟进问题"}]}, config)
undefinedStreaming
流式传输
The agent supports all LangGraph stream modes.
Agent支持所有LangGraph流式传输模式。
Stream Updates
流式更新
python
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "Write a report"}]},
stream_mode="updates"
):
print(chunk) # {"node_name": {"key": "value"}}python
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "撰写一份报告"}]},
stream_mode="updates"
):
print(chunk) # {"node_name": {"key": "value"}}Stream Messages (Token-by-Token)
消息流式传输(逐Token)
python
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "Explain quantum computing"}]},
stream_mode="messages"
):
# Real-time token streaming
print(chunk.content, end="", flush=True)python
for chunk in agent.stream(
{"messages": [{"role": "user", "content": "解释量子计算"}]},
stream_mode="messages"
):
# 实时逐Token输出
print(chunk.content, end="", flush=True)Async Streaming
异步流式传输
python
async for chunk in agent.astream(
{"messages": [...]},
stream_mode="updates"
):
print(chunk)python
async for chunk in agent.astream(
{"messages": [...]},
stream_mode="updates"
):
print(chunk)Multiple Stream Modes
多模式流式传输
python
for mode, chunk in agent.stream(
{"messages": [...]},
stream_mode=["updates", "messages"]
):
if mode == "messages":
print("Token:", chunk.content)
else:
print("Update:", chunk)python
for mode, chunk in agent.stream(
{"messages": [...]},
stream_mode=["updates", "messages"]
):
if mode == "messages":
print("Token:", chunk.content)
else:
print("Update:", chunk)Backend Configuration
后端配置
StateBackend (Default - Ephemeral)
StateBackend(默认 - 临时存储)
Files stored in agent state, persist within thread only.
python
undefined文件存储在Agent状态中,仅在当前线程内持久化。
python
undefinedImplicit - this is the default
隐式设置 - 默认使用此后端
agent = create_deep_agent()
agent = create_deep_agent()
Explicit
显式设置
from deepagents.backends import StateBackend
agent = create_deep_agent(backend=lambda rt: StateBackend(rt))
undefinedfrom deepagents.backends import StateBackend
agent = create_deep_agent(backend=lambda rt: StateBackend(rt))
undefinedFilesystemBackend (Real Disk)
FilesystemBackend(本地磁盘存储)
Read/write actual files on disk. Enables tool for shell commands.
executepython
from deepagents.backends import FilesystemBackend
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/path/to/project"),
)读写本地实际文件,支持工具执行Shell命令。
executepython
from deepagents.backends import FilesystemBackend
agent = create_deep_agent(
backend=FilesystemBackend(root_dir="/path/to/project"),
)StoreBackend (Persistent Cross-Thread)
StoreBackend(跨线程持久化存储)
Uses LangGraph Store for persistence across conversations.
python
from langgraph.store.memory import InMemoryStore
from deepagents.backends import StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=lambda rt: StoreBackend(rt),
store=store, # Required for StoreBackend
)使用LangGraph Store实现跨对话持久化。
python
from langgraph.store.memory import InMemoryStore
from deepagents.backends import StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=lambda rt: StoreBackend(rt),
store=store, # StoreBackend必填参数
)CompositeBackend (Hybrid Routing)
CompositeBackend(混合路由)
Route different paths to different backends.
python
from langgraph.store.memory import InMemoryStore
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=CompositeBackend(
default=StateBackend(), # /workspace/* → ephemeral
routes={
"/memories/": StoreBackend(store=store), # persistent
"/preferences/": StoreBackend(store=store), # persistent
},
),
store=store,
)将不同路径路由到不同后端。
python
from langgraph.store.memory import InMemoryStore
from deepagents.backends import CompositeBackend, StateBackend, StoreBackend
store = InMemoryStore()
agent = create_deep_agent(
backend=CompositeBackend(
default=StateBackend(), # /workspace/* → 临时存储
routes={
"/memories/": StoreBackend(store=store), # 持久化存储
"/preferences/": StoreBackend(store=store), # 持久化存储
},
),
store=store,
)Files under /memories/ persist across all conversations
/memories/下的文件在所有对话中持久化
Files under /workspace/ are ephemeral per-thread
/workspace/下的文件仅在当前线程内临时存储
undefinedundefinedSubagents
子Agent
Using the Default General-Purpose Agent
使用默认通用Agent
By default, a subagent is available with all main agent tools.
general-purposepython
agent = create_deep_agent(tools=[web_search])默认提供子Agent,包含主Agent的所有工具。
general-purposepython
agent = create_deep_agent(tools=[web_search])The agent can now delegate via the task
tool:
taskAgent现在可通过task
工具委托任务:
tasktask(subagent_type="general-purpose", prompt="Research topic X in depth")
task(subagent_type="general-purpose", prompt="深入研究主题X")
undefinedundefinedDefining Custom Subagents
定义自定义子Agent
python
from deepagents import create_deep_agent
research_agent = {
"name": "researcher",
"description": "Conducts deep research on complex topics with web search",
"system_prompt": """You are an expert researcher.
Search thoroughly, cross-reference sources, and synthesize findings.""",
"tools": [web_search, document_reader],
}
code_agent = {
"name": "coder",
"description": "Writes, reviews, and debugs code",
"system_prompt": "You are an expert programmer. Write clean, tested code.",
"tools": [code_executor, linter],
"model": "openai:gpt-4o", # Optional: different model per subagent
}
agent = create_deep_agent(
subagents=[research_agent, code_agent],
system_prompt="Delegate research to the researcher and coding to the coder.",
)python
from deepagents import create_deep_agent
research_agent = {
"name": "researcher",
"description": "通过网络搜索对复杂主题进行深入研究",
"system_prompt": """你是一名专家研究员。
进行全面搜索、交叉验证来源并整合研究结果。""",
"tools": [web_search, document_reader],
}
code_agent = {
"name": "coder",
"description": "编写、评审和调试代码",
"system_prompt": "你是一名专家程序员,编写简洁、经过测试的代码。",
"tools": [code_executor, linter],
"model": "openai:gpt-4o", # 可选:为子Agent指定不同模型
}
agent = create_deep_agent(
subagents=[research_agent, code_agent],
system_prompt="将研究任务委托给研究员,编码任务委托给程序员。",
)Pre-compiled LangGraph Subagents
预编译LangGraph子Agent
Use existing LangGraph graphs as subagents.
python
from deepagents import CompiledSubAgent, create_deep_agent
from langgraph.prebuilt import create_react_agent将现有LangGraph图作为子Agent使用。
python
from deepagents import CompiledSubAgent, create_deep_agent
from langgraph.prebuilt import create_react_agentExisting graph
现有图
custom_graph = create_react_agent(
model="anthropic:claude-sonnet-4-5-20250929",
tools=[specialized_tool],
prompt="Custom workflow instructions",
)
agent = create_deep_agent(
subagents=[CompiledSubAgent(
name="custom-workflow",
description="Runs my specialized analysis workflow",
runnable=custom_graph,
)]
)
undefinedcustom_graph = create_react_agent(
model="anthropic:claude-sonnet-4-5-20250929",
tools=[specialized_tool],
prompt="自定义工作流说明",
)
agent = create_deep_agent(
subagents=[CompiledSubAgent(
name="custom-workflow",
description="运行我的专业分析工作流",
runnable=custom_graph,
)]
)
undefinedSubagent with Custom Middleware
带自定义中间件的子Agent
python
from langchain.agents.middleware import AgentMiddleware
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response):
print(f"Subagent response: {response}")
return response
agent_spec = {
"name": "logged-agent",
"description": "Agent with extra logging",
"system_prompt": "You are helpful.",
"tools": [],
"middleware": [LoggingMiddleware()], # Added after default middleware
}python
from langchain.agents.middleware import AgentMiddleware
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response):
print(f"Subagent response: {response}")
return response
agent_spec = {
"name": "logged-agent",
"description": "带有额外日志功能的Agent",
"system_prompt": "你是一个乐于助人的助手。",
"tools": [],
"middleware": [LoggingMiddleware()], # 添加到默认中间件之后
}Human-in-the-Loop
人机协同(Human-in-the-Loop)
Basic Interrupt Configuration
基础中断配置
Pause execution before specific tools for human approval.
python
from deepagents import create_deep_agent
agent = create_deep_agent(
tools=[send_email, delete_file, web_search],
interrupt_on={
"send_email": True, # Simple interrupt
"delete_file": True, # Require approval before delete
# web_search not listed - runs without approval
},
checkpointer=checkpointer, # Required for interrupts
)在执行特定工具前暂停,等待人工批准。
python
from deepagents import create_deep_agent
agent = create_deep_agent(
tools=[send_email, delete_file, web_search],
interrupt_on={
"send_email": True, # 简单中断
"delete_file": True, # 删除前需要批准
# 未列出web_search - 无需批准直接运行
},
checkpointer=checkpointer, # 中断功能必填
)Interrupt with Options
带选项的中断
python
agent = create_deep_agent(
tools=[send_email],
interrupt_on={
"send_email": {
"allowed_decisions": ["approve", "edit", "reject"]
},
},
checkpointer=checkpointer,
)python
agent = create_deep_agent(
tools=[send_email],
interrupt_on={
"send_email": {
"allowed_decisions": ["approve", "edit", "reject"]
},
},
checkpointer=checkpointer,
)Invoke - will pause at send_email
调用Agent - 会在send_email处暂停
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
config = {"configurable": {"thread_id": "user-123"}}
result = agent.invoke({"messages": [...]}, config)
Check state
检查状态
state = agent.get_state(config)
if state.next: # Has pending interrupt
# Resume with approval
from langgraph.types import Command
agent.invoke(Command(resume={"approved": True}), config)
# Or resume with edit
agent.invoke(Command(resume={"edited_args": {"to": "new@email.com"}}), config)
# Or reject
agent.invoke(Command(resume={"rejected": True}), config)undefinedstate = agent.get_state(config)
if state.next: # 存在待处理中断
# 批准后恢复
from langgraph.types import Command
agent.invoke(Command(resume={"approved": True}), config)
# 编辑后恢复
agent.invoke(Command(resume={"edited_args": {"to": "new@email.com"}}), config)
# 拒绝操作
agent.invoke(Command(resume={"rejected": True}), config)undefinedInterrupt on Subagent Tools
子Agent工具中断
python
undefinedpython
undefinedInterrupts apply to subagents too
中断规则同样适用于子Agent
agent = create_deep_agent(
subagents=[research_agent],
interrupt_on={
"web_search": True, # Interrupt even when subagent calls it
},
checkpointer=checkpointer,
)
undefinedagent = create_deep_agent(
subagents=[research_agent],
interrupt_on={
"web_search": True, # 即使子Agent调用也会触发中断
},
checkpointer=checkpointer,
)
undefinedCustom Middleware
自定义中间件
Middleware Structure
中间件结构
python
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelRequest,
ModelResponse,
)
from langchain_core.tools import tool
class MyMiddleware(AgentMiddleware):
# Tools to inject
tools = []
# System prompt content to inject
system_prompt = ""
def transform_request(self, request: ModelRequest) -> ModelRequest:
"""Modify request before sending to model."""
return request
def transform_response(self, response: ModelResponse) -> ModelResponse:
"""Modify response after receiving from model."""
return responsepython
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelRequest,
ModelResponse,
)
from langchain_core.tools import tool
class MyMiddleware(AgentMiddleware):
# 要注入的工具
tools = []
# 要注入的系统提示内容
system_prompt = ""
def transform_request(self, request: ModelRequest) -> ModelRequest:
"""在发送给模型前修改请求。"""
return request
def transform_response(self, response: ModelResponse) -> ModelResponse:
"""在接收模型响应后修改响应。"""
return responseInjecting Tools via Middleware
通过中间件注入工具
python
from langchain_core.tools import tool
@tool
def get_current_time() -> str:
"""Get the current time."""
from datetime import datetime
return datetime.now().isoformat()
class TimeMiddleware(AgentMiddleware):
tools = [get_current_time]
system_prompt = "You have access to get_current_time for time-sensitive tasks."
agent = create_deep_agent(middleware=[TimeMiddleware()])python
from langchain_core.tools import tool
@tool
def get_current_time() -> str:
"""获取当前时间。"""
from datetime import datetime
return datetime.now().isoformat()
class TimeMiddleware(AgentMiddleware):
tools = [get_current_time]
system_prompt = "你可以使用get_current_time工具处理时间敏感任务。"
agent = create_deep_agent(middleware=[TimeMiddleware()])Context Injection Middleware
上下文注入中间件
python
class UserContextMiddleware(AgentMiddleware):
def __init__(self, user_preferences: dict):
self.user_preferences = user_preferences
@property
def system_prompt(self):
return f"User preferences: {self.user_preferences}"
agent = create_deep_agent(
middleware=[UserContextMiddleware({"theme": "dark", "language": "en"})]
)python
class UserContextMiddleware(AgentMiddleware):
def __init__(self, user_preferences: dict):
self.user_preferences = user_preferences
@property
def system_prompt(self):
return f"用户偏好: {self.user_preferences}"
agent = create_deep_agent(
middleware=[UserContextMiddleware({"theme": "dark", "language": "en"})]
)Response Logging Middleware
响应日志中间件
python
import logging
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response: ModelResponse) -> ModelResponse:
logging.info(f"Agent response: {response.messages[-1].content[:100]}...")
return response
agent = create_deep_agent(middleware=[LoggingMiddleware()])python
import logging
class LoggingMiddleware(AgentMiddleware):
def transform_response(self, response: ModelResponse) -> ModelResponse:
logging.info(f"Agent response: {response.messages[-1].content[:100]}...")
return response
agent = create_deep_agent(middleware=[LoggingMiddleware()])MCP Tool Integration
MCP工具集成
Connect MCP (Model Context Protocol) servers to provide additional tools.
python
from langchain_mcp_adapters.client import MultiServerMCPClient
from deepagents import create_deep_agent
async def main():
mcp_client = MultiServerMCPClient({
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path"],
},
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": os.environ["GITHUB_TOKEN"]},
},
})
mcp_tools = await mcp_client.get_tools()
agent = create_deep_agent(tools=mcp_tools)
async for chunk in agent.astream(
{"messages": [{"role": "user", "content": "List my repos"}]}
):
print(chunk)连接MCP(Model Context Protocol)服务器以扩展工具能力。
python
from langchain_mcp_adapters.client import MultiServerMCPClient
from deepagents import create_deep_agent
async def main():
mcp_client = MultiServerMCPClient({
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path"],
},
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": os.environ["GITHUB_TOKEN"]},
},
})
mcp_tools = await mcp_client.get_tools()
agent = create_deep_agent(tools=mcp_tools)
async for chunk in agent.astream(
{"messages": [{"role": "user", "content": "列出我的仓库"}]}
):
print(chunk)Additional References
更多参考
For detailed reference documentation, see:
- Built-in Tools Reference - Complete list of tools available on every agent (filesystem, task management, subagent delegation) with path requirements
- Common Patterns - Production-ready examples including research agents with memory, code assistants with disk access, multi-specialist teams, and production PostgreSQL setup
如需详细参考文档,请查看:
- 内置工具参考 - 所有Agent可用工具的完整列表(文件系统、任务管理、子Agent委托)及路径要求
- 常见模式 - 生产级示例,包括带记忆的研究Agent、磁盘访问的代码助手、多专家团队和生产环境PostgreSQL配置