Loading...
Loading...
Fix broken AI features. Use when your AI is throwing errors, producing wrong outputs, crashing, returning garbage, not responding, or behaving unexpectedly. Covers DSPy debugging, error diagnosis, and troubleshooting.
npx skill4agent add lebsral/dspy-programming-not-prompting-lms-skills ai-fixing-errorsimport dspy
# Check current config
print(dspy.settings.lm) # Should show your LM, not None
# If None, configure it:
lm = dspy.LM("openai/gpt-4o-mini")
dspy.configure(lm=lm)dspy.configure(lm=lm)provider/model-name# Test the AI provider directly
lm = dspy.LM("openai/gpt-4o-mini")
response = lm("Hello, respond with just 'OK'")
print(response)# Check your signature defines the right fields
class MySignature(dspy.Signature):
"""Clear task description here."""
input_field: str = dspy.InputField(desc="what this contains")
output_field: str = dspy.OutputField(desc="what to produce")
# Verify by inspecting
print(MySignature.fields)dspy.InputField()dspy.OutputField()strlist[str]Literal[...]# Check that input field names match
result = my_program(question="test") # field name must match signature
# Wrong:
result = my_program(q="test") # 'q' doesn't match 'question'
result = my_program("test") # positional args don't workresult = my_program(question="test")
print(result) # see all fields
print(result.answer) # access specific field
print(type(result.answer)) # check typeLiteral# Show the last 3 AI calls
dspy.inspect_history(n=3)AttributeError: 'NoneType' has no attribute ...dspy.configure(lm=lm)ValueError: Could not parse outputdspy.inspect_history()dspy.ChainOfThoughtdspy.PredictTypeError: forward() got an unexpected keyword argumentInputField# Check retriever config
print(dspy.settings.rm)
# Test retriever directly
rm = dspy.ColBERTv2(url="http://...")
results = rm("test query", k=3)
print(results)max_bootstrapped_demosdspy.Assertdspy.Suggestdspy.configure(lm=lm, trace=[])
# Now run your program — trace will be populated
result = my_program(question="test")# Print the module tree
print(my_program)
# See all named predictors
for name, predictor in my_program.named_predictors():
print(f"{name}: {predictor}")class MyPipeline(dspy.Module):
def __init__(self):
self.step1 = dspy.ChainOfThought("question -> search_query")
self.step2 = dspy.Retrieve(k=3)
self.step3 = dspy.ChainOfThought("context, question -> answer")
def forward(self, question):
query = self.step1(question=question)
print(f"Step 1 output: {query.search_query}") # Debug
context = self.step2(query.search_query)
print(f"Step 2 retrieved: {len(context.passages)} passages") # Debug
answer = self.step3(context=context.passages, question=question)
print(f"Step 3 output: {answer.answer}") # Debug
return answer# Before optimization
baseline = MyProgram()
baseline(question="test")
print("=== BASELINE PROMPT ===")
dspy.inspect_history(n=1)
# After optimization
optimized = MyProgram()
optimized.load("optimized.json")
optimized(question="test")
print("=== OPTIMIZED PROMPT ===")
dspy.inspect_history(n=1)/ai-improving-accuracy/ai-tracing-requestsdocs/dspy-reference.md