Loading...
Loading...
This skill should be used when the user asks to "build an agent with Google ADK", "use the Agent Development Kit", "create a Google ADK agent", "set up ADK tools", or needs guidance on Google's Agent Development Kit best practices, multi-agent systems, or agent evaluation.
npx skill4agent add the-perfect-developer/the-perfect-opencode google-adkLlmAgentAgentfrom google.adk.agents import LlmAgent
agent = LlmAgent(
name="research_agent", # unique, snake_case
model="gemini-2.5-flash",
description="Searches and summarizes research papers.", # used for multi-agent routing
instruction="You are a research assistant. ...", # most critical field
tools=[search_tool, summarize_tool],
)| Field | Purpose |
|---|---|
| Unique identifier; used for agent transfer |
| Shown to parent agents for routing decisions |
| Gemini model string (e.g. |
| System prompt — the most critical field |
| List of callable tools or |
| Write agent response to |
| Pydantic model for structured JSON output |
| |
search_tool{state_key}{artifact.name}instruction="""
You are a customer support agent for Acme Corp.
## Behavior
- Greet the user by name using {user_name}
- For billing questions, always use `lookup_invoice` before responding
- Escalate to human if sentiment is negative three times in a row
## Examples
User: "What's my balance?"
Action: Call lookup_invoice(account_id="{account_id}")
""",output_schemafrom pydantic import BaseModel
class Report(BaseModel):
title: str
summary: str
confidence: float
agent = LlmAgent(
...,
output_schema=Report,
output_key="report", # writes JSON to session.state["report"]
)output_schematoolsdef get_weather(city: str, units: str = "celsius") -> dict:
"""Get current weather for a city.
Args:
city: The city name to look up.
units: Temperature units, either 'celsius' or 'fahrenheit'.
Returns:
dict with keys: temperature, condition, humidity.
"""
# implementation ...
return {"temperature": 22, "condition": "sunny", "humidity": 60}Optional[T] = Nonedict"status""success""error"*args**kwargssession.statetemp:from google.adk.tools import ToolContext
def store_result(data: str, tool_context: ToolContext) -> dict:
"""Store intermediate result for downstream tools."""
tool_context.state["temp:last_result"] = data
return {"status": "success"}
def read_result(tool_context: ToolContext) -> dict:
"""Read the stored intermediate result."""
value = tool_context.state.get("temp:last_result", "")
return {"status": "success", "result": value}from google.adk.tools import LongRunningFunctionTool, AgentTool
# Wrap async/long-running operations
slow_tool = LongRunningFunctionTool(func=run_batch_job)
# Invoke a sub-agent as an explicit tool call
sub_agent_tool = AgentTool(agent=specialist_agent)sub_agentsorchestrator = LlmAgent(
name="orchestrator",
model="gemini-2.5-flash",
instruction="Route tasks to the appropriate specialist.",
sub_agents=[research_agent, writer_agent, reviewer_agent],
)SequentialAgentoutput_key{state_key}from google.adk.agents import SequentialAgent
pipeline = SequentialAgent(
name="report_pipeline",
sub_agents=[
LlmAgent(name="researcher", ..., output_key="research_notes"),
LlmAgent(name="writer",
instruction="Write a report based on: {research_notes}",
output_key="draft"),
LlmAgent(name="reviewer",
instruction="Review this draft: {draft}"),
],
)ParallelAgentoutput_keyfrom google.adk.agents import ParallelAgent
parallel = ParallelAgent(
name="multi_search",
sub_agents=[
LlmAgent(name="web_searcher", ..., output_key="web_results"),
LlmAgent(name="doc_searcher", ..., output_key="doc_results"),
LlmAgent(name="db_searcher", ..., output_key="db_results"),
],
)LoopAgentmax_iterationsescalate=Truefrom google.adk.agents import LoopAgent
refiner = LoopAgent(
name="refinement_loop",
max_iterations=5,
sub_agents=[draft_agent, critic_agent],
)transfer_to_agent(agent_name="...")descriptiondict| Prefix | Scope | Example |
|---|---|---|
| (none) | Persistent across session | |
| Current turn only | |
| User-level across sessions | |
| Application-level global | |
ToolContext{state_key}ToolContextdef sensitive_lookup(query: str, tool_context: ToolContext) -> dict:
"""Look up sensitive records."""
if not tool_context.state.get("user:verified"):
return {"status": "error", "message": "User not verified."}
# proceed with lookup ...before_tool_callbackfrom google.adk.tools import ToolContext
def validate_args(tool_name: str, args: dict, tool_context: ToolContext):
if tool_name == "delete_record" and not args.get("confirm"):
raise ValueError("delete_record requires confirm=True")
agent = LlmAgent(..., before_tool_callback=validate_args)generate_content_configfrom google.genai.types import GenerateContentConfig, SafetySetting, HarmCategory, HarmBlockThreshold
agent = LlmAgent(
...,
generate_content_config=GenerateContentConfig(
temperature=0.2,
max_output_tokens=2048,
safety_settings=[
SafetySetting(
category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
),
)| Format | File | Use |
|---|---|---|
| Unit tests | | Single-turn, deterministic assertions |
| Integration tests | | Multi-turn conversation flows |
# Launch interactive web UI
adk web
# CLI evaluation
adk eval path/to/agent path/to/tests.evalset.json
# pytest integration
pytest tests/ -k "eval"| Metric | Description |
|---|---|
| Exact match on tool call sequence |
| ROUGE-1 similarity to expected response |
| LLM-based semantic match |
| Detects fabricated facts |
| Flags safety violations |
pip install google-adkfrom google.adk.agents import LlmAgent
agent = LlmAgent(
name="my_agent",
model="gemini-2.5-flash",
instruction="You are a helpful assistant.",
)adk web # web UI
adk run # CLI interactive
adk api_server # REST API serverBuiltInPlannerPlanReActPlannerreferences/agent-design.mdreferences/tools-and-sessions.mdreferences/safety-and-evaluation.md