Loading...
Loading...
Tool use patterns for Claude including schema design, tool_choice modes, result handling, parallel execution, error recovery, and extended thinking integration.
npx skill4agent add lobbi-docs/claude tool-use^[a-zA-Z0-9_-]{1,64}${
"name": "get_stock_price",
"description": "Retrieves the current stock price for a given ticker symbol. The ticker symbol must be a valid symbol for a publicly traded company on a major US stock exchange like NYSE or NASDAQ. The tool will return the latest trade price in USD. Use this when the user asks about the current or most recent price of a specific stock. It will not provide any other information about the stock or company beyond the price.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc. Must be uppercase."
},
"include_historical": {
"type": "boolean",
"description": "Optional. Whether to include 52-week high/low prices.",
"default": false
}
},
"required": ["ticker"],
"additionalProperties": false
},
"input_examples": [
{"ticker": "AAPL"},
{"ticker": "MSFT", "include_historical": true},
{"ticker": "GOOGL"}
]
}input_examplesadditionalProperties: false| Mode | Behavior | Use Case |
|---|---|---|
| Claude decides to use tools or not (default) | General tool use, letting Claude decide |
| Claude must use one tool but can choose which | Forcing tool use without specific tool |
| Force specific tool (e.g., | Structured JSON output, specific tool required |
| Prevent all tool use | Normal text-only responses |
# Allow Claude to decide
# tool_choice="auto" (default)
# Force any tool to be used
tool_choice={"type": "any"}
# Force specific tool (useful for JSON output)
tool_choice={"type": "tool", "name": "record_summary"}
# Prevent tool use
tool_choice={"type": "none"}tool_choice: {"type": "auto"}{"type": "none"}{"type": "any"}{"type": "tool"}strict: truetools=[{
"name": "search_flights",
"strict": True, # Enable strict mode
"input_schema": {
"type": "object",
"properties": {
"destination": {"type": "string"},
"departure_date": {"type": "string", "format": "date"},
"passengers": {"type": "integer"}
},
"required": ["destination", "departure_date"],
"additionalProperties": False
}
}]"2"2structured-outputs-2025-11-13import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[{
"name": "get_weather",
"description": "Get current weather in a location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
}
}],
messages=[{"role": "user", "content": "What's the weather in SF?"}]
)
# Check if Claude wants to use tool
if response.stop_reason == "tool_use":
tool_use = next(b for b in response.content if b.type == "tool_use")
print(f"Tool: {tool_use.name}")
print(f"Input: {tool_use.input}")
# Execute tool (simulate)
tool_result = "68°F, partly cloudy"
# Return result to Claude
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[...], # Same tools
messages=[
{"role": "user", "content": "What's the weather in SF?"},
{"role": "assistant", "content": response.content},
{"role": "user", "content": [{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": tool_result
}]}
]
)
print(response.content[0].text)# User asks: "What's the weather where I am?"
# Flow: get_location() → get_weather(location)
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[
{
"name": "get_location",
"description": "Get user's location from IP address",
"input_schema": {"type": "object", "properties": {}}
},
{
"name": "get_weather",
"description": "Get weather for location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
],
messages=[{"role": "user", "content": "What's the weather where I am?"}]
)
# Claude calls get_location first
tool_use = next(b for b in response.content if b.type == "tool_use")
location_result = "San Francisco, CA"
# Send location result back
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[...],
messages=[
{"role": "user", "content": "What's the weather where I am?"},
{"role": "assistant", "content": response.content},
{"role": "user", "content": [{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": location_result
}]}
]
)
# Claude now calls get_weather with the location
tool_use2 = next(b for b in response.content if b.type == "tool_use")
weather_result = "68°F, sunny"
# Final result
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[...],
messages=[
{"role": "user", "content": "What's the weather where I am?"},
{"role": "assistant", "content": response.content},
{"role": "user", "content": [{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": location_result
}]},
{"role": "assistant", "content": response.content},
{"role": "user", "content": [{
"type": "tool_result",
"tool_use_id": tool_use2.id,
"content": weather_result
}]}
]
)
print(response.content[0].text)response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[...],
messages=[{
"role": "user",
"content": "What's the weather in SF and NYC? What time is it there?"
}]
)
# Claude makes 4 parallel tool calls (2 weather + 2 time)
tool_uses = [b for b in response.content if b.type == "tool_use"]
print(f"Parallel calls: {len(tool_uses)}") # 4
# Execute all tools and collect results
tool_results = []
for tool_use in tool_uses:
if tool_use.name == "get_weather":
if "San Francisco" in str(tool_use.input):
result = "68°F, partly cloudy"
else:
result = "45°F, clear"
else: # get_time
if "Los_Angeles" in str(tool_use.input):
result = "2:30 PM PST"
else:
result = "5:30 PM EST"
tool_results.append({
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": result
})
# IMPORTANT: Return all results in ONE user message
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[...],
messages=[
{"role": "user", "content": "What's the weather in SF and NYC? What time is it there?"},
{"role": "assistant", "content": response.content},
{"role": "user", "content": tool_results} # All results together!
]
)
print(response.content[0].text){
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9",
"content": "15 degrees"
}{
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9",
"content": [
{"type": "text", "text": "Current weather screenshot:"},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg..."
}
}
]
}{
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9",
"content": [
{"type": "text", "text": "Document content:"},
{
"type": "document",
"source": {
"type": "text",
"media_type": "text/plain",
"data": "Full document content here"
}
}
]
}# Tool execution error
{
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9",
"content": "ConnectionError: Weather API is unavailable (HTTP 500)",
"is_error": True
}
# Missing parameter error
{
"type": "tool_result",
"tool_use_id": "toolu_01A09q90qw90lq917835lq9",
"content": "Error: Missing required 'location' parameter",
"is_error": True
}if tool_execution_failed:
tool_result = {
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": f"Error: {error_message}",
"is_error": True
}strict: true# With strict: true, Claude CANNOT send invalid parameters
# Invalid type: "passengers": "two" → prevented by schema
# Missing required field → prevented by schema
# Type mismatch: int vs string → prevented by schemaif response.stop_reason == "max_tokens":
last_block = response.content[-1]
if last_block.type == "tool_use":
# Incomplete tool use, retry with more tokens
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=4096, # Increased
messages=messages,
tools=tools
)response = client.beta.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
betas=["structured-outputs-2025-11-13"],
tools=[{
"name": "record_summary",
"description": "Record structured image summary",
"input_schema": {
"type": "object",
"properties": {
"key_colors": {
"type": "array",
"items": {
"type": "object",
"properties": {
"r": {"type": "number"},
"g": {"type": "number"},
"b": {"type": "number"},
"name": {"type": "string"}
},
"required": ["r", "g", "b", "name"]
}
},
"description": {"type": "string"},
"estimated_year": {"type": "integer"}
},
"required": ["key_colors", "description"]
}
}],
tool_choice={"type": "tool", "name": "record_summary"},
messages=[{
"role": "user",
"content": [
{"type": "image", "source": {"type": "url", "url": "https://..."}},
{"type": "text", "text": "Describe this image"}
]
}]
)
# Extract structured output from tool use input
tool_use = next(b for b in response.content if b.type == "tool_use")
structured_data = tool_use.input<use_parallel_tool_calls>
For maximum efficiency, whenever you perform multiple independent operations,
invoke all relevant tools simultaneously rather than sequentially. Prioritize
calling tools in parallel whenever possible. When reading 3 files, run 3 tool
calls in parallel. When running multiple commands like 'ls' or 'list_dir',
always run all commands in parallel.
</use_parallel_tool_calls>def measure_parallel_efficiency(messages):
# Find assistant messages with tool use
tool_call_messages = [
msg for msg in messages
if msg.get("role") == "assistant"
and any(b.get("type") == "tool_use" for b in msg.get("content", []))
]
total_tools = sum(
len([b for b in msg.get("content", []) if b.get("type") == "tool_use"])
for msg in tool_call_messages
)
if not tool_call_messages:
return 0
avg_per_message = total_tools / len(tool_call_messages)
print(f"Average tools per message: {avg_per_message}")
# > 1.0 indicates parallel tool use working# ✅ ALLOWED with extended thinking
response = client.messages.create(
model="claude-opus-4-5",
thinking={"type": "enabled", "budget_tokens": 2048},
tools=[...],
tool_choice={"type": "auto"}, # Default
messages=[...]
)
# ✅ ALLOWED with extended thinking
tool_choice={"type": "none"} # No tools
# ❌ NOT ALLOWED with extended thinking
# tool_choice={"type": "any"} → Error
# tool_choice={"type": "tool", "name": "..."} → Errortools=[{
"name": "think",
"description": "Pause and think carefully before proceeding",
"input_schema": {
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "Your detailed reasoning"
}
},
"required": ["reasoning"]
}
}, {
"name": "get_weather",
"description": "Get weather information",
"input_schema": {...}
}]import anthropic
from anthropic import beta_tool
client = anthropic.Anthropic()
@beta_tool
def get_weather(location: str, unit: str = "fahrenheit") -> str:
"""Get current weather in a location.
Args:
location: City and state, e.g. San Francisco, CA
unit: Temperature unit, either 'celsius' or 'fahrenheit'
"""
# Tool implementation
return '{"temperature": "20°C", "condition": "Sunny"}'
# Tool runner automatically handles tool execution loop
runner = client.beta.messages.tool_runner(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[get_weather],
messages=[{"role": "user", "content": "What's the weather in Paris?"}]
)
# Iterate through responses
for message in runner:
print(message.content[0].text)
# Or get final message directly
final_message = runner.until_done()enumconstanyOfallOf$ref$defdefinitionsminItems$refadditionalPropertiestools=[{
"name": "extract_info",
"description": "Extract structured data from text",
"input_schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"email": {"type": "string"},
"company": {"type": "string"}
},
"required": ["name", "email"]
}
}]tools=[{
"name": "search_api",
"description": "Search external API",
"input_schema": {
"type": "object",
"properties": {
"query": {"type": "string"},
"limit": {"type": "integer", "minimum": 1, "maximum": 100}
},
"required": ["query"]
}
}]tools=[
{"name": "validate_input", ...},
{"name": "process_data", ...},
{"name": "save_result", ...}
]
# Claude orchestrates the workflowtool_choice# Force use of a tool (for JSON output)
response = client.messages.create(
model="claude-sonnet-4-5",
max_tokens=1024,
tools=[sentiment_tool],
tool_choice={"type": "tool", "name": "sentiment_tool"},
messages=[{"role": "user", "content": "Analyze this text..."}]
)
# No prefilled explanations with forced tool_choice
# Claude goes straight to tool use
# For explanations WITH tool use, use tool_choice="auto" (default)
# and add instruction: "Use the sentiment_tool in your response"tool_useend_turnmax_tokenspause_turn