Loading...
Loading...
Build voice agents with the Cartesia Line SDK. Supports 100+ LLM providers via LiteLLM with tool calling, multi-agent handoffs, and real-time interruption handling.
npx skill4agent add cartesia-ai/skills line-voice-agentcartesia┌─────────────────────────────────────────────────────────────────┐
│ Cartesia Line Platform │
│ ┌──────────┐ ┌──────────────┐ ┌──────────┐ │
│ │ Ink │───▶│ Your Agent │───▶│ Sonic │ │
│ │ (STT) │ │ (Line SDK) │ │ (TTS) │ │
│ └──────────┘ └──────────────┘ └──────────┘ │
│ ▲ │ │
│ │ Audio Orchestration │ │
│ └────────────────────────────────────┘ │
└─────────────────────────────────────────────────────────────────┘
▲ │
│ WebSocket ▼
┌───────┴────────────────────────────────────┴───────┐
│ Client (Phone / Web / Mobile) │
└─────────────────────────────────────────────────────┘ANTHROPIC_API_KEYOPENAI_API_KEYGEMINI_API_KEYcurl -fsSL https://cartesia.sh | sh# Authentication
cartesia auth login # Login with Cartesia API key
cartesia auth status # Check auth status
# Project Setup
cartesia create [project-name] # Create project from template
cartesia init # Link existing directory to an agent
# Local Development
cartesia chat <port> # Chat with local agent (text mode)
# Deployment
cartesia deploy # Deploy to Cartesia cloud
cartesia status # Check deployment status
# Environment Variables (encrypted, stored on Cartesia)
cartesia env set KEY=VALUE # Set a single env var
cartesia env set --from .env # Import all vars from .env file
cartesia env rm <name> # Remove an env var
# Agents & Calls
cartesia agents ls # List all agents
cartesia deployments ls # List deployments
cartesia call <phone> [agent-id] # Make outbound callcartesia auth login
cartesia create my-agent
cd my-agentmain.pyimport os
from line.llm_agent import LlmAgent, LlmConfig, end_call
from line.voice_agent_app import AgentEnv, CallRequest, VoiceAgentApp
async def get_agent(env: AgentEnv, call_request: CallRequest):
return LlmAgent(
model="anthropic/claude-haiku-4-5-20251001",
api_key=os.getenv("ANTHROPIC_API_KEY"),
tools=[end_call],
config=LlmConfig(
system_prompt="You are a helpful voice assistant.",
introduction="Hello! How can I help you today?",
),
)
app = VoiceAgentApp(get_agent=get_agent)
if __name__ == "__main__":
app.run()ANTHROPIC_API_KEY=your-key python main.py
cartesia chat 8000 # Text chat with your running agentcartesia env set ANTHROPIC_API_KEY=your-key # Encrypted, stored on Cartesia
cartesia deploy
cartesia status # Verify deployment is activecartesia call +1234567890 # Outbound call via CLImy_agent/
├── main.py # VoiceAgentApp entry point (REQUIRED)
├── cartesia.toml # Deployment config, created by cartesia init or cartesia create (REQUIRED)
└── pyproject.toml # Dependencies: cartesia-linefrom line.llm_agent import LlmAgent, LlmConfig
agent = LlmAgent(
model="gemini/gemini-2.5-flash-preview-09-2025", # LiteLLM model string
api_key=os.getenv("GEMINI_API_KEY"), # Provider API key
tools=[end_call, my_custom_tool], # List of tools
config=LlmConfig(...), # Agent configuration
max_tool_iterations=10, # Max tool call loops (default: 10)
)from line.llm_agent import LlmConfig
config = LlmConfig(
# Agent behavior
system_prompt="You are a helpful assistant.",
introduction="Hello! How can I help?", # Set to "" to wait for user first
# Sampling parameters (optional)
temperature=0.7,
max_tokens=1024,
top_p=0.9,
# Resilience (optional)
num_retries=2,
timeout=30.0,
fallbacks=["gpt-4o-mini"], # Fallback models
)LlmConfig.from_call_request()async def get_agent(env: AgentEnv, call_request: CallRequest):
return LlmAgent(
model="anthropic/claude-sonnet-4-20250514",
api_key=os.getenv("ANTHROPIC_API_KEY"),
tools=[end_call],
config=LlmConfig.from_call_request(
call_request,
fallback_system_prompt="Default system prompt if not in request.",
fallback_introduction="Default introduction if not in request.",
temperature=0.7, # Additional LlmConfig options
),
)from line.voice_agent_app import VoiceAgentApp, AgentEnv, CallRequest
async def get_agent(env: AgentEnv, call_request: CallRequest):
# env.loop - asyncio event loop
# call_request.call_id - unique call identifier
# call_request.agent.system_prompt - from request
# call_request.agent.introduction - from request
# call_request.metadata - custom metadata dict
return LlmAgent(...)
app = VoiceAgentApp(get_agent=get_agent)
app.run(host="0.0.0.0", port=8000)line.llm_agentfrom line.llm_agent import end_call, send_dtmf, transfer_call, web_searchtools=[end_call]
# System prompt: "Say goodbye before ending the call with end_call."tools=[send_dtmf]
# Buttons: "0"-"9", "*", "#" (strings, not integers!)tools=[transfer_call]
# Example: +14155551234# Default settings
tools=[web_search]
# Custom settings
tools=[web_search(search_context_size="high")] # "low", "medium", "high"| Type | Decorator | Use Case | Result Handling |
|---|---|---|---|
| Loopback | | API calls, database lookups | Result sent back to LLM |
| Passthrough | | End call, transfer, DTMF | Bypasses LLM, goes to user |
| Handoff | | Multi-agent workflows | Transfers control to another agent |
Does the result need LLM processing?
├─ YES → @loopback_tool
│ └─ Is it long-running (>1s)? → @loopback_tool(is_background=True)
│ └─ Yield interim status, then final result
├─ NO, deterministic action → @passthrough_tool
│ └─ Yields OutputEvent objects directly (AgentSendText, AgentEndCall, etc.)
└─ Transfer to another agent → @handoff_tool or agent_as_handoff()from typing import Annotated
from line.llm_agent import loopback_tool, ToolEnv
@loopback_tool
async def get_order_status(
ctx: ToolEnv,
order_id: Annotated[str, "The order ID to look up"],
) -> str:
"""Look up the current status of an order."""
order = await db.get_order(order_id)
return f"Order {order_id} status: {order.status}, ETA: {order.eta}"ctx: ToolEnvAnnotated[type, "description"]Optional[T]@loopback_tool
async def search_products(
ctx: ToolEnv,
query: Annotated[str, "Search query"],
category: Annotated[str, "Product category"] = "all", # Optional with default
limit: Annotated[int, "Max results"] = 10,
) -> str:
"""Search the product catalog."""
...from line.events import AgentSendText, AgentTransferCall
from line.llm_agent import passthrough_tool, ToolEnv
@passthrough_tool
async def transfer_to_support(
ctx: ToolEnv,
reason: Annotated[str, "Reason for transfer"],
):
"""Transfer the call to the support team."""
yield AgentSendText(text="Let me transfer you to our support team now.")
yield AgentTransferCall(target_phone_number="+18005551234")line.eventsAgentSendText(text="...")AgentEndCall()AgentTransferCall(target_phone_number="+1...")AgentSendDtmf(button="5")gemini/gemini-2.5-flash-preview-09-2025anthropic/claude-haiku-4-5-20251001gpt-4o-minianthropic/claude-opus-4-5gpt-4o| Provider | Format | Example |
|---|---|---|
| OpenAI | | |
| Anthropic | | |
| Google Gemini | | |
| Azure OpenAI | | |
OPENAI_API_KEYANTHROPIC_API_KEYGEMINI_API_KEYAZURE_API_KEYfrom typing import Annotated
from line.llm_agent import LlmAgent, LlmConfig, loopback_tool, end_call, ToolEnv
@loopback_tool
async def check_appointment(
ctx: ToolEnv,
date: Annotated[str, "Date in YYYY-MM-DD format"],
) -> str:
"""Check available appointment slots for a given date."""
slots = await calendar.get_available_slots(date)
return f"Available slots on {date}: {', '.join(slots)}"
@loopback_tool
async def book_appointment(
ctx: ToolEnv,
date: Annotated[str, "Date in YYYY-MM-DD format"],
time: Annotated[str, "Time in HH:MM format"],
name: Annotated[str, "Customer name"],
) -> str:
"""Book an appointment slot."""
result = await calendar.book(date, time, name)
return f"Appointment booked for {name} on {date} at {time}. Confirmation: {result.id}"
async def get_agent(env: AgentEnv, call_request: CallRequest):
return LlmAgent(
model="gemini/gemini-2.5-flash-preview-09-2025",
api_key=os.getenv("GEMINI_API_KEY"),
tools=[check_appointment, book_appointment, end_call],
config=LlmConfig(
system_prompt="""You are an appointment scheduling assistant.
Help users check availability and book appointments.
Always confirm the booking details before finalizing.""",
introduction="Hi! I can help you schedule an appointment. What date works for you?",
),
)introduction=""config=LlmConfig(
system_prompt="You are a helpful assistant.",
introduction="", # Empty string = wait for user
)@loopback_tool
async def record_answer(
ctx: ToolEnv,
answer: Annotated[str, "The user's answer"],
) -> dict:
"""Record an answer to the current question."""
# Process and validate answer
# Return next question or completion status
return {"next_question": "What is your email?", "is_complete": False}end_call# BAD
raise ValueError("Invalid order ID")
# GOOD
return "I couldn't find that order. Please check the ID and try again."ctxctx: ToolEnv# GOOD
@loopback_tool
async def my_tool(ctx: ToolEnv, order_id: Annotated[str, "Order ID"]): ...eventevent# GOOD
@handoff_tool
async def my_handoff(ctx: ToolEnv, param: Annotated[str, "desc"], event): ...# GOOD
async def my_tool(ctx, order_id: Annotated[str, "The order ID to look up"]): ...is_background=True@loopback_tool(is_background=True)
async def slow_search(ctx: ToolEnv, query: Annotated[str, "Query"]):
yield "Searching..." # Immediate feedback
result = await slow_operation()
yield resultasyncio.to_thread()result = await asyncio.to_thread(sync_api_call, params)# Core
from line.llm_agent import LlmAgent, LlmConfig
from line.voice_agent_app import VoiceAgentApp, AgentEnv, CallRequest
# Built-in tools
from line.llm_agent import end_call, send_dtmf, transfer_call, web_search
# Tool decorators
from line.llm_agent import loopback_tool, passthrough_tool, handoff_tool
# Tool context
from line.llm_agent import ToolEnv
# Multi-agent
from line.llm_agent import agent_as_handoff
# Events (for passthrough/handoff tools and custom agents)
from line.events import (
AgentSendText,
AgentEndCall,
AgentTransferCall,
AgentSendDtmf,
AgentUpdateCall,
)examples/basic_chat/main.pyexamples/form_filler/examples/chat_supervisor/main.pyexamples/transfer_agent/main.pyexamples/echo/tools.py