Loading...
Loading...
Compare original and translation side by side
dspy.Assertimport dspy
import re
class CitedAnswer(dspy.Signature):
"""Answer the question using the provided sources. Cite every claim with [1], [2], etc."""
context: list[str] = dspy.InputField(desc="Numbered source documents")
question: str = dspy.InputField()
answer: str = dspy.OutputField(desc="Answer with inline citations like [1], [2]")
class CitationEnforcer(dspy.Module):
def __init__(self):
self.answer = dspy.ChainOfThought(CitedAnswer)
def forward(self, context, question):
result = self.answer(context=context, question=question)
# Every 1-2 sentences must have a citation
sentences = [s.strip() for s in result.answer.split(".") if s.strip()]
citations_found = [bool(re.search(r"\[\d+\]", s)) for s in sentences]
# Check that at least half the sentences have citations
citation_ratio = sum(citations_found) / max(len(sentences), 1)
dspy.Assert(
citation_ratio >= 0.5,
"Answer must cite sources. Use [1], [2], etc. after claims. "
f"Only {citation_ratio:.0%} of sentences have citations."
)
# Check that cited numbers actually exist in the context
cited_nums = set(int(n) for n in re.findall(r"\[(\d+)\]", result.answer))
valid_nums = set(range(1, len(context) + 1))
invalid = cited_nums - valid_nums
dspy.Assert(
len(invalid) == 0,
f"Citations {invalid} don't match any source. Valid sources: [1] to [{len(context)}]."
)
return resultdspy.Assertimport dspy
import re
class CitedAnswer(dspy.Signature):
"""Answer the question using the provided sources. Cite every claim with [1], [2], etc."""
context: list[str] = dspy.InputField(desc="Numbered source documents")
question: str = dspy.InputField()
answer: str = dspy.OutputField(desc="Answer with inline citations like [1], [2]")
class CitationEnforcer(dspy.Module):
def __init__(self):
self.answer = dspy.ChainOfThought(CitedAnswer)
def forward(self, context, question):
result = self.answer(context=context, question=question)
# Every 1-2 sentences must have a citation
sentences = [s.strip() for s in result.answer.split(".") if s.strip()]
citations_found = [bool(re.search(r"\[\d+\]", s)) for s in sentences]
# Check that at least half the sentences have citations
citation_ratio = sum(citations_found) / max(len(sentences), 1)
dspy.Assert(
citation_ratio >= 0.5,
"Answer must cite sources. Use [1], [2], etc. after claims. "
f"Only {citation_ratio:.0%} of sentences have citations."
)
# Check that cited numbers actually exist in the context
cited_nums = set(int(n) for n in re.findall(r"\[(\d+)\]", result.answer))
valid_nums = set(range(1, len(context) + 1))
invalid = cited_nums - valid_nums
dspy.Assert(
len(invalid) == 0,
f"Citations {invalid} don't match any source. Valid sources: [1] to [{len(context)}]."
)
return resultclass CheckFaithfulness(dspy.Signature):
"""Check if every claim in the answer is supported by the context."""
context: list[str] = dspy.InputField(desc="Source documents")
answer: str = dspy.InputField(desc="Generated answer to verify")
is_faithful: bool = dspy.OutputField(desc="Is every claim supported by the context?")
unsupported_claims: list[str] = dspy.OutputField(desc="Claims not found in context")
class FaithfulResponder(dspy.Module):
def __init__(self):
self.retrieve = dspy.Retrieve(k=5)
self.answer = dspy.ChainOfThought(CitedAnswer)
self.verify = dspy.Predict(CheckFaithfulness)
def forward(self, question):
context = self.retrieve(question).passages
result = self.answer(context=context, question=question)
check = self.verify(context=context, answer=result.answer)
dspy.Assert(
check.is_faithful,
f"Answer contains unsupported claims: {check.unsupported_claims}. "
"Rewrite using only information from the provided sources."
)
return resultdspy.Assertmax_backtrack_attemptsclass CheckFaithfulness(dspy.Signature):
"""Check if every claim in the answer is supported by the context."""
context: list[str] = dspy.InputField(desc="Source documents")
answer: str = dspy.InputField(desc="Generated answer to verify")
is_faithful: bool = dspy.OutputField(desc="Is every claim supported by the context?")
unsupported_claims: list[str] = dspy.OutputField(desc="Claims not found in context")
class FaithfulResponder(dspy.Module):
def __init__(self):
self.retrieve = dspy.Retrieve(k=5)
self.answer = dspy.ChainOfThought(CitedAnswer)
self.verify = dspy.Predict(CheckFaithfulness)
def forward(self, question):
context = self.retrieve(question).passages
result = self.answer(context=context, question=question)
check = self.verify(context=context, answer=result.answer)
dspy.Assert(
check.is_faithful,
f"Answer contains unsupported claims: {check.unsupported_claims}. "
"Rewrite using only information from the provided sources."
)
return resultdspy.Assertmax_backtrack_attemptsclass SelfCheckedAnswer(dspy.Module):
def __init__(self):
self.answer = dspy.ChainOfThought("context, question -> answer")
self.check = dspy.ChainOfThought(CheckFaithfulness)
def forward(self, context, question):
result = self.answer(context=context, question=question)
verification = self.check(context=context, answer=result.answer)
dspy.Suggest(
verification.is_faithful,
f"Some claims may not be supported: {verification.unsupported_claims}. "
"Consider revising to stick closer to the sources."
)
return dspy.Prediction(
answer=result.answer,
is_verified=verification.is_faithful,
unsupported=verification.unsupported_claims,
)dspy.Suggestdspy.Assertclass SelfCheckedAnswer(dspy.Module):
def __init__(self):
self.answer = dspy.ChainOfThought("context, question -> answer")
self.check = dspy.ChainOfThought(CheckFaithfulness)
def forward(self, context, question):
result = self.answer(context=context, question=question)
verification = self.check(context=context, answer=result.answer)
dspy.Suggest(
verification.is_faithful,
f"Some claims may not be supported: {verification.unsupported_claims}. "
"Consider revising to stick closer to the sources."
)
return dspy.Prediction(
answer=result.answer,
is_verified=verification.is_faithful,
unsupported=verification.unsupported_claims,
)dspy.Suggestdspy.Assertclass CrossChecked(dspy.Module):
def __init__(self):
self.gen_a = dspy.ChainOfThought("context, question -> answer")
self.gen_b = dspy.ChainOfThought("context, question -> answer")
self.compare = dspy.Predict(CompareAnswers)
def forward(self, context, question):
a = self.gen_a(context=context, question=question)
b = self.gen_b(context=context, question=question)
check = self.compare(answer_a=a.answer, answer_b=b.answer)
dspy.Assert(
check.agree,
f"Two independent answers disagree: {check.discrepancy}. "
"This suggests hallucination. Regenerate with closer attention to sources."
)
return a
class CompareAnswers(dspy.Signature):
"""Check if two independently generated answers agree on the facts."""
answer_a: str = dspy.InputField()
answer_b: str = dspy.InputField()
agree: bool = dspy.OutputField(desc="Do they agree on all factual claims?")
discrepancy: str = dspy.OutputField(desc="What they disagree on, if anything")class CrossChecked(dspy.Module):
def __init__(self):
self.gen_a = dspy.ChainOfThought("context, question -> answer")
self.gen_b = dspy.ChainOfThought("context, question -> answer")
self.compare = dspy.Predict(CompareAnswers)
def forward(self, context, question):
a = self.gen_a(context=context, question=question)
b = self.gen_b(context=context, question=question)
check = self.compare(answer_a=a.answer, answer_b=b.answer)
dspy.Assert(
check.agree,
f"Two independent answers disagree: {check.discrepancy}. "
"This suggests hallucination. Regenerate with closer attention to sources."
)
return a
class CompareAnswers(dspy.Signature):
"""Check if two independently generated answers agree on the facts."""
answer_a: str = dspy.InputField()
answer_b: str = dspy.InputField()
agree: bool = dspy.OutputField(desc="Do they agree on all factual claims?")
discrepancy: str = dspy.OutputField(desc="What they disagree on, if anything")/ai-searching-docsclass GroundedQA(dspy.Module):
def __init__(self):
self.retrieve = dspy.Retrieve(k=5)
self.answer = dspy.ChainOfThought(CitedAnswer)
self.verify = dspy.Predict(CheckFaithfulness)
def forward(self, question):
# Ground in retrieved sources
context = self.retrieve(question).passages
# Generate with citation requirement
result = self.answer(context=context, question=question)
# Verify faithfulness
check = self.verify(context=context, answer=result.answer)
dspy.Assert(
check.is_faithful,
f"Unsupported claims: {check.unsupported_claims}. "
"Only use information from the provided sources."
)
return result/ai-searching-docsclass GroundedQA(dspy.Module):
def __init__(self):
self.retrieve = dspy.Retrieve(k=5)
self.answer = dspy.ChainOfThought(CitedAnswer)
self.verify = dspy.Predict(CheckFaithfulness)
def forward(self, question):
# Ground in retrieved sources
context = self.retrieve(question).passages
# Generate with citation requirement
result = self.answer(context=context, question=question)
# Verify faithfulness
check = self.verify(context=context, answer=result.answer)
dspy.Assert(
check.is_faithful,
f"Unsupported claims: {check.unsupported_claims}. "
"Only use information from the provided sources."
)
return resultclass ConfidenceGated(dspy.Signature):
"""Answer the question and rate your confidence."""
context: list[str] = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
confidence: float = dspy.OutputField(desc="0.0 to 1.0, how confident are you?")
reasoning: str = dspy.OutputField(desc="Why this confidence level?")
class GatedResponder(dspy.Module):
def __init__(self, threshold=0.7):
self.respond = dspy.ChainOfThought(ConfidenceGated)
self.threshold = threshold
def forward(self, context, question):
result = self.respond(context=context, question=question)
if result.confidence < self.threshold:
return dspy.Prediction(
answer=result.answer,
needs_review=True,
confidence=result.confidence,
reason=result.reasoning,
)
return dspy.Prediction(
answer=result.answer,
needs_review=False,
confidence=result.confidence,
)class ConfidenceGated(dspy.Signature):
"""Answer the question and rate your confidence."""
context: list[str] = dspy.InputField()
question: str = dspy.InputField()
answer: str = dspy.OutputField()
confidence: float = dspy.OutputField(desc="0.0 to 1.0, how confident are you?")
reasoning: str = dspy.OutputField(desc="Why this confidence level?")
class GatedResponder(dspy.Module):
def __init__(self, threshold=0.7):
self.respond = dspy.ChainOfThought(ConfidenceGated)
self.threshold = threshold
def forward(self, context, question):
result = self.respond(context=context, question=question)
if result.confidence < self.threshold:
return dspy.Prediction(
answer=result.answer,
needs_review=True,
confidence=result.confidence,
reason=result.reasoning,
)
return dspy.Prediction(
answer=result.answer,
needs_review=False,
confidence=result.confidence,
)dspy.Assertmax_backtrack_attemptsdspy.Assertmax_backtrack_attempts| Pattern | Cost | Latency | Best for |
|---|---|---|---|
| Citation enforcement | 1 LM call | Low | When you have numbered sources |
| Faithfulness verification | 2 LM calls | Medium | RAG systems, doc Q&A |
| Self-check | 2 LM calls | Medium | General fact-checking |
| Cross-check | 3 LM calls | High | High-stakes, critical outputs |
| Confidence gating | 1 LM call | Low | Human-in-the-loop systems |
| Retrieval grounding | 1 retrieval + 1-2 LM | Medium | When you have a knowledge base |
| 模式 | 成本 | 延迟 | 适用场景 |
|---|---|---|---|
| 引用强制 | 1次LM调用 | 低 | 拥有编号原始材料时 |
| 忠实性验证 | 2次LM调用 | 中 | RAG系统、文档问答 |
| 自我检查 | 2次LM调用 | 中 | 通用事实核查 |
| 交叉检查 | 3次LM调用 | 高 | 高风险、关键输出 |
| 置信度门控 | 1次LM调用 | 低 | 有人工参与的系统 |
| 检索锚定 | 1次检索 + 1-2次LM调用 | 中 | 拥有知识库时 |
dspy.Assertdspy.Suggestdspy.Assertdspy.Suggest/ai-searching-docs/ai-checking-outputs/ai-following-rulesexamples.md/ai-searching-docs/ai-checking-outputs/ai-following-rulesexamples.md