Loading...
Loading...
CrewAI agent design and configuration. Use when creating, configuring, or debugging crewAI agents — choosing role/goal/backstory, selecting LLMs, assigning tools, tuning max_iter/max_rpm/max_execution_time, enabling planning/code execution/delegation, setting up knowledge sources, using guardrails, or configuring agents in YAML vs code.
npx skill4agent add crewaiinc/skills design-agentdesign-task| Bad | Good |
|---|---|
| |
| |
| |
| Bad | Good |
|---|---|
| |
| |
| |
backstory: >
You're a seasoned researcher with 15 years of experience in AI/ML.
You're known for your ability to find obscure but relevant papers
and synthesize complex findings into clear, actionable insights.
You always cite your sources and flag uncertainty explicitly.Agent(
role="...", # Required: agent's expertise area
goal="...", # Required: what the agent aims to achieve
backstory="...", # Required: context and personality
llm="openai/gpt-4o", # Optional: defaults to OPENAI_MODEL_NAME env var or "gpt-4"
tools=[...], # Optional: list of tool instances
)Agent(
...,
max_iter=25, # Max reasoning iterations per task (default: 25)
max_execution_time=300, # Timeout in seconds (default: None — no limit)
max_rpm=10, # Rate limit: max API calls per minute (default: None)
max_retry_limit=2, # Retries on error (default: 2)
verbose=True, # Show detailed execution logs (default: False)
)max_iterfrom crewai_tools import SerperDevTool, ScrapeWebsiteTool, FileReadTool
Agent(
...,
tools=[SerperDevTool(), ScrapeWebsiteTool()], # Agent-level tools
)design-taskAgent(
...,
llm="openai/gpt-4o", # Main reasoning model
function_calling_llm="openai/gpt-4o-mini", # Cheaper model for tool calls only
)function_calling_llmllmAgent(
...,
allow_delegation=False, # Default: False — agent works alone
)allow_delegation=Truefrom crewai.agents.agent_builder.base_agent import PlanningConfig
Agent(
...,
planning=True, # Enable plan-then-execute (default: False)
planning_config=PlanningConfig(
max_attempts=3, # Max planning iterations
),
)Agent(
...,
allow_code_execution=True, # Enable code execution (default: False)
code_execution_mode="safe", # "safe" (Docker) or "unsafe" (direct) — default: "safe"
)"safe""unsafe"Agent(
...,
respect_context_window=True, # Auto-summarize to stay within limits (default: True)
)TrueFalseAgent(
...,
inject_date=True, # Add current date to task context (default: False)
date_format="%Y-%m-%d", # Date format (default: "%Y-%m-%d")
)def validate_no_pii(result) -> tuple[bool, Any]:
"""Reject output containing PII."""
if contains_pii(result.raw):
return (False, "Output contains PII. Remove all personal information and try again.")
return (True, result)
Agent(
...,
guardrail=validate_no_pii,
guardrail_max_retries=3, # default: 3
)guardrail_max_retriesfrom crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
Agent(
...,
knowledge_sources=[
TextFileKnowledgeSource(file_paths=["company_handbook.txt"]),
],
embedder={
"provider": "openai",
"config": {"model": "text-embedding-3-small"},
},
)agents.yamlresearcher:
role: >
{topic} Senior Data Researcher
goal: >
Uncover cutting-edge developments in {topic}
with supporting evidence and source citations
backstory: >
You're a seasoned researcher with 15 years of experience.
Known for finding obscure but relevant sources and
synthesizing complex findings into clear insights.
You always cite your sources and flag uncertainty.
# Optional overrides (uncomment as needed):
# llm: openai/gpt-4o
# max_iter: 15
# max_rpm: 10
# allow_delegation: false
# verbose: truecrew.py@CrewBase
class MyCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config["researcher"],
tools=[SerperDevTool()],
)def researcherresearcher:KeyErrorAgent.kickoff()from crewai import Agent
from crewai_tools import SerperDevTool
researcher = Agent(
role="Senior Research Analyst",
goal="Find comprehensive, factual information with source citations",
backstory="Expert researcher known for thorough, evidence-based analysis.",
tools=[SerperDevTool()],
llm="openai/gpt-4o",
)
# Pass a string prompt — the agent reasons, uses tools, and returns a result
result = researcher.kickoff("What are the latest developments in quantum computing?")
print(result.raw) # str — the agent's full response
print(result.usage_metrics) # token usage statsfrom pydantic import BaseModel
class ResearchFindings(BaseModel):
key_trends: list[str]
sources: list[str]
confidence: float
result = researcher.kickoff(
"Research the latest AI agent frameworks",
response_format=ResearchFindings,
)
# Access via .pydantic (NOT directly — Agent.kickoff wraps the result)
print(result.pydantic.key_trends) # list[str]
print(result.pydantic.confidence) # float
print(result.raw) # raw string versionNote:returnsAgent.kickoff()— access structured output viaLiteAgentOutput. This differs fromresult.pydanticwhich returns the Pydantic object directly.LLM.call()
result = researcher.kickoff(
"Analyze this document and summarize the key findings",
input_files={"document": FileInput(path="report.pdf")},
)result = await researcher.kickoff_async(
"Research quantum computing breakthroughs",
response_format=ResearchFindings,
)Agent.kickoff()from crewai import Agent
from crewai.flow.flow import Flow, listen, start
from crewai_tools import SerperDevTool, ScrapeWebsiteTool
from pydantic import BaseModel
class ResearchState(BaseModel):
topic: str = ""
research: str = ""
analysis: str = ""
report: str = ""
class ResearchFlow(Flow[ResearchState]):
@start()
def gather_data(self):
researcher = Agent(
role="Senior Researcher",
goal="Find comprehensive data with sources",
backstory="Expert at finding and validating information.",
tools=[SerperDevTool(), ScrapeWebsiteTool()],
)
result = researcher.kickoff(f"Research: {self.state.topic}")
self.state.research = result.raw
@listen(gather_data)
def analyze(self):
analyst = Agent(
role="Data Analyst",
goal="Extract actionable insights from raw research",
backstory="Skilled at pattern recognition and synthesis.",
)
result = analyst.kickoff(
f"Analyze this research and extract key insights:\n\n{self.state.research}"
)
self.state.analysis = result.raw
@listen(analyze)
def write_report(self):
writer = Agent(
role="Report Writer",
goal="Create clear, well-structured reports",
backstory="Technical writer who makes complex topics accessible.",
)
result = writer.kickoff(
f"Write a comprehensive report from this analysis:\n\n{self.state.analysis}"
)
self.state.report = result.raw
flow = ResearchFlow()
flow.kickoff(inputs={"topic": "AI agents"})
print(flow.state.report)Agent.kickoff()Crew.kickoff()technical_writercopywritereditorResearcher → Writer → EditorCrew(
agents=[researcher, writer, editor],
tasks=[research_task, writing_task, editing_task],
process=Process.hierarchical,
manager_llm="openai/gpt-4o",
)allow_delegation=Truelead_researcher = Agent(
role="Lead Researcher",
goal="Coordinate research efforts",
backstory="...",
allow_delegation=True, # Can delegate to other agents in the crew
)| Mistake | Impact | Fix |
|---|---|---|
| Generic role like "Assistant" | Agent produces unfocused, shallow output | Use specific expertise: "Senior Financial Analyst" |
| No tools for data-gathering tasks | Agent hallucinates data instead of searching | Always add tools when the task requires external info |
| Too many tools (10+) | Agent gets confused choosing between tools | Limit to 3-5 relevant tools per agent |
| Backstory full of task instructions | Agent mixes personality with task execution | Keep backstory about WHO the agent is; task details go in the task |
| Agents waste iterations delegating trivially | Only enable when delegation genuinely helps |
| max_iter too high for simple tasks | Agent loops unnecessarily on vague tasks | Lower max_iter; fix the task description instead |
| No guardrail on critical output | Bad output passes through unchecked | Add guardrails for outputs that feed into production systems |
| Using expensive LLM for tool calls | Unnecessary cost for mechanical operations | Set |
@toolBaseTool