Loading...
Loading...
Compare original and translation side by side
forward()import dspy
class SupportPipeline(dspy.Module):
def __init__(self):
self.classify = dspy.ChainOfThought(ClassifyTicket)
self.retrieve = dspy.Retrieve(k=3)
self.draft = dspy.ChainOfThought(DraftResponse)
def forward(self, ticket):
# Stage 1: Classify
classification = self.classify(ticket=ticket)
# Stage 2: Retrieve relevant docs
docs = self.retrieve(classification.category + " " + ticket).passages
# Stage 3: Draft response using classification + docs
return self.draft(
ticket=ticket,
category=classification.category,
context=docs,
)from typing import Literal
CATEGORIES = ["billing", "technical", "account", "general"]
class ClassifyTicket(dspy.Signature):
"""Classify the support ticket."""
ticket: str = dspy.InputField()
category: Literal[tuple(CATEGORIES)] = dspy.OutputField()
class DraftResponse(dspy.Signature):
"""Draft a helpful response to the support ticket."""
ticket: str = dspy.InputField()
category: str = dspy.InputField()
context: list[str] = dspy.InputField(desc="Relevant help articles")
response: str = dspy.OutputField(desc="Professional support response")forward()import dspy
class SupportPipeline(dspy.Module):
def __init__(self):
self.classify = dspy.ChainOfThought(ClassifyTicket)
self.retrieve = dspy.Retrieve(k=3)
self.draft = dspy.ChainOfThought(DraftResponse)
def forward(self, ticket):
# Stage 1: Classify
classification = self.classify(ticket=ticket)
# Stage 2: Retrieve relevant docs
docs = self.retrieve(classification.category + " " + ticket).passages
# Stage 3: Draft response using classification + docs
return self.draft(
ticket=ticket,
category=classification.category,
context=docs,
)from typing import Literal
CATEGORIES = ["billing", "technical", "account", "general"]
class ClassifyTicket(dspy.Signature):
"""Classify the support ticket."""
ticket: str = dspy.InputField()
category: Literal[tuple(CATEGORIES)] = dspy.OutputField()
class DraftResponse(dspy.Signature):
"""Draft a helpful response to the support ticket."""
ticket: str = dspy.InputField()
category: str = dspy.InputField()
context: list[str] = dspy.InputField(desc="Relevant help articles")
response: str = dspy.OutputField(desc="Professional support response")class RoutedPipeline(dspy.Module):
def __init__(self):
self.classify = dspy.ChainOfThought(ClassifyInput)
self.handlers = {
"simple": dspy.Predict(SimpleAnswer),
"complex": dspy.ChainOfThought(DetailedAnswer),
"research": dspy.ChainOfThought(ResearchAnswer),
}
def forward(self, question):
category = self.classify(question=question).category
handler = self.handlers.get(category, self.handlers["simple"])
return handler(question=question)class RoutedPipeline(dspy.Module):
def __init__(self):
self.classify = dspy.ChainOfThought(ClassifyInput)
self.handlers = {
"simple": dspy.Predict(SimpleAnswer),
"complex": dspy.ChainOfThought(DetailedAnswer),
"research": dspy.ChainOfThought(ResearchAnswer),
}
def forward(self, question):
category = self.classify(question=question).category
handler = self.handlers.get(category, self.handlers["simple"])
return handler(question=question)class GenerateAndRefine(dspy.Module):
def __init__(self):
self.generate = dspy.ChainOfThought(GenerateDraft)
self.verify = dspy.ChainOfThought(CheckQuality)
self.refine = dspy.ChainOfThought(ImproveDraft)
def forward(self, task):
# Stage 1: Generate
draft = self.generate(task=task)
# Stage 2: Verify
check = self.verify(task=task, draft=draft.output)
# Stage 3: Refine if needed
if not check.is_good:
refined = self.refine(
task=task,
draft=draft.output,
feedback=check.feedback,
)
return refined
return draftclass GenerateAndRefine(dspy.Module):
def __init__(self):
self.generate = dspy.ChainOfThought(GenerateDraft)
self.verify = dspy.ChainOfThought(CheckQuality)
self.refine = dspy.ChainOfThought(ImproveDraft)
def forward(self, task):
# Stage 1: Generate
draft = self.generate(task=task)
# Stage 2: Verify
check = self.verify(task=task, draft=draft.output)
# Stage 3: Refine if needed
if not check.is_good:
refined = self.refine(
task=task,
draft=draft.output,
feedback=check.feedback,
)
return refined
return draftclass EnsemblePipeline(dspy.Module):
def __init__(self, num_candidates=5):
self.generators = [dspy.ChainOfThought(GenerateAnswer) for _ in range(num_candidates)]
self.judge = dspy.ChainOfThought(PickBestAnswer)
def forward(self, question):
# Stage 1: Generate multiple candidates
candidates = []
for gen in self.generators:
result = gen(question=question)
candidates.append(result.answer)
# Stage 2: Pick the best
return self.judge(
question=question,
candidates=candidates,
)
class PickBestAnswer(dspy.Signature):
"""Pick the best answer from the candidates."""
question: str = dspy.InputField()
candidates: list[str] = dspy.InputField(desc="Multiple answer candidates")
best_answer: str = dspy.OutputField(desc="The most accurate and complete answer")
reasoning: str = dspy.OutputField(desc="Why this answer was chosen")class EnsemblePipeline(dspy.Module):
def __init__(self, num_candidates=5):
self.generators = [dspy.ChainOfThought(GenerateAnswer) for _ in range(num_candidates)]
self.judge = dspy.ChainOfThought(PickBestAnswer)
def forward(self, question):
# Stage 1: Generate multiple candidates
candidates = []
for gen in self.generators:
result = gen(question=question)
candidates.append(result.answer)
# Stage 2: Pick the best
return self.judge(
question=question,
candidates=candidates,
)
class PickBestAnswer(dspy.Signature):
"""Pick the best answer from the candidates."""
question: str = dspy.InputField()
candidates: list[str] = dspy.InputField(desc="Multiple answer candidates")
best_answer: str = dspy.OutputField(desc="The most accurate and complete answer")
reasoning: str = dspy.OutputField(desc="Why this answer was chosen")class ParallelAnalysis(dspy.Module):
def __init__(self):
self.sentiment = dspy.ChainOfThought(AnalyzeSentiment)
self.topics = dspy.ChainOfThought(ExtractTopics)
self.entities = dspy.ChainOfThought(ExtractEntities)
self.summarize = dspy.ChainOfThought(CombineAnalysis)
def forward(self, text):
# Fan out — run in parallel (DSPy can parallelize these)
sent = self.sentiment(text=text)
topics = self.topics(text=text)
entities = self.entities(text=text)
# Merge results
return self.summarize(
text=text,
sentiment=sent.sentiment,
topics=topics.topics,
entities=entities.entities,
)class ParallelAnalysis(dspy.Module):
def __init__(self):
self.sentiment = dspy.ChainOfThought(AnalyzeSentiment)
self.topics = dspy.ChainOfThought(ExtractTopics)
self.entities = dspy.ChainOfThought(ExtractEntities)
self.summarize = dspy.ChainOfThought(CombineAnalysis)
def forward(self, text):
# Fan out — run in parallel (DSPy can parallelize these)
sent = self.sentiment(text=text)
topics = self.topics(text=text)
entities = self.entities(text=text)
# Merge results
return self.summarize(
text=text,
sentiment=sent.sentiment,
topics=topics.topics,
entities=entities.entities,
)class IterativeRefiner(dspy.Module):
def __init__(self, max_iterations=3):
self.generate = dspy.ChainOfThought(GenerateDraft)
self.evaluate = dspy.ChainOfThought(EvaluateDraft)
self.improve = dspy.ChainOfThought(ImproveDraft)
self.max_iterations = max_iterations
def forward(self, task):
draft = self.generate(task=task)
for i in range(self.max_iterations):
evaluation = self.evaluate(task=task, draft=draft.output)
if evaluation.score >= 0.9:
break
draft = self.improve(
task=task,
draft=draft.output,
feedback=evaluation.feedback,
)
return draftclass IterativeRefiner(dspy.Module):
def __init__(self, max_iterations=3):
self.generate = dspy.ChainOfThought(GenerateDraft)
self.evaluate = dspy.ChainOfThought(EvaluateDraft)
self.improve = dspy.ChainOfThought(ImproveDraft)
self.max_iterations = max_iterations
def forward(self, task):
draft = self.generate(task=task)
for i in range(self.max_iterations):
evaluation = self.evaluate(task=task, draft=draft.output)
if evaluation.score >= 0.9:
break
draft = self.improve(
task=task,
draft=draft.output,
feedback=evaluation.feedback,
)
return draftexpensive_lm = dspy.LM("openai/gpt-4o")
cheap_lm = dspy.LM("openai/gpt-4o-mini")
pipeline = SupportPipeline()expensive_lm = dspy.LM("openai/gpt-4o")
cheap_lm = dspy.LM("openai/gpt-4o-mini")
pipeline = SupportPipeline()
See `/ai-cutting-costs` for more cost optimization strategies.
查看`/ai-cutting-costs`获取更多成本优化策略。def pipeline_metric(example, prediction, trace=None):
# Score the final output quality
return prediction.response.lower().strip() == example.response.lower().strip()def pipeline_metric(example, prediction, trace=None):
# Score the final output quality
return prediction.response.lower().strip() == example.response.lower().strip()undefinedundefinedforward()forwarddspy.inspect_history()forward()forwarddspy.inspect_history()| If your pipeline... | Use |
|---|---|
| Steps run in a fixed order | DSPy pipeline (this skill) |
| Steps branch based on results | DSPy pipeline with |
| Needs cycles (retry loops, agent loops) | LangGraph |
| Needs persistent state across calls | LangGraph with checkpointing |
| Needs human approval mid-pipeline | LangGraph |
| Coordinates multiple independent agents | LangGraph supervisor pattern |
| 如果你的流水线... | 使用 |
|---|---|
| 步骤按固定顺序执行 | DSPy流水线(本技能) |
| 步骤根据结果分支 | 在 |
| 需要循环(重试循环、代理循环) | LangGraph |
| 需要跨调用的持久化状态 | 带检查点的LangGraph |
| 流水线中间需要人工审批 | 带 |
| 协调多个独立代理 | LangGraph 监督模式 |
import dspy
from langgraph.graph import StateGraph, START, END
from typing import TypedDict
class PipelineState(TypedDict):
input_text: str
category: str
output: strimport dspy
from langgraph.graph import StateGraph, START, END
from typing import TypedDict
class PipelineState(TypedDict):
input_text: str
category: str
output: str
This gives you LangGraph's state management and routing with DSPy's optimizable prompts. For more, see `/ai-building-chatbots` (stateful conversations) and `/ai-coordinating-agents` (multi-agent systems). For the full LangGraph API reference, see [`docs/langchain-langgraph-reference.md`](../../docs/langchain-langgraph-reference.md).
这让你既能利用LangGraph的状态管理和路由功能,又能使用DSPy的可优化提示词。更多内容请查看`/ai-building-chatbots`(有状态对话)和`/ai-coordinating-agents`(多代理系统)。完整的LangGraph API参考请查看[`docs/langchain-langgraph-reference.md`](../../docs/langchain-langgraph-reference.md)。/ai-checking-outputs/ai-cutting-costs/ai-decomposing-tasks/ai-writing-content/ai-reasoning/ai-improving-accuracy/ai-checking-outputs/ai-cutting-costs/ai-decomposing-tasks/ai-writing-content/ai-reasoning/ai-improving-accuracy