Loading...
Loading...
Build LiveKit Agent backends in Python. Use this skill when creating voice AI agents, voice assistants, or any realtime AI application using LiveKit's Python Agents SDK (livekit-agents). Covers AgentSession, Agent class, function tools, STT/LLM/TTS models, turn detection, and multi-agent workflows.
npx skill4agent add codestackr/livekit-skills agents-pydocs_searchget_pagesget_changelogcode_searchget_python_agent_exampleuv add "livekit-agents[silero,turn-detector]~=1.3" \
"livekit-plugins-noise-cancellation~=0.2" \
"python-dotenv".env.locallk app env -w.env.localLIVEKIT_API_KEY=your_api_key
LIVEKIT_API_SECRET=your_api_secret
LIVEKIT_URL=wss://your-project.livekit.cloudfrom dotenv import load_dotenv
from livekit import agents, rtc
from livekit.agents import AgentSession, Agent, AgentServer, room_io
from livekit.plugins import noise_cancellation, silero
from livekit.plugins.turn_detector.multilingual import MultilingualModel
load_dotenv(".env.local")
class Assistant(Agent):
def __init__(self) -> None:
super().__init__(
instructions="""You are a helpful voice AI assistant.
Keep responses concise, 1-3 sentences. No markdown or emojis.""",
)
server = AgentServer()
@server.rtc_session()
async def entrypoint(ctx: agents.JobContext):
session = AgentSession(
stt="assemblyai/universal-streaming:en",
llm="openai/gpt-4.1-mini",
tts="cartesia/sonic-3:9626c31c-bec5-4cca-baa8-f8ba9e84c8bc",
vad=silero.VAD.load(),
turn_detection=MultilingualModel(),
)
await session.start(
room=ctx.room,
agent=Assistant(),
room_options=room_io.RoomOptions(
audio_input=room_io.AudioInputOptions(
noise_cancellation=lambda params: noise_cancellation.BVCTelephony()
if params.participant.kind == rtc.ParticipantKind.PARTICIPANT_KIND_SIP
else noise_cancellation.BVC(),
),
),
)
await session.generate_reply(
instructions="Greet the user and offer your assistance."
)
if __name__ == "__main__":
agents.cli.run_app(server)from dotenv import load_dotenv
from livekit import agents, rtc
from livekit.agents import AgentSession, Agent, AgentServer, room_io
from livekit.plugins import openai, noise_cancellation
load_dotenv(".env.local")
class Assistant(Agent):
def __init__(self) -> None:
super().__init__(
instructions="You are a helpful voice AI assistant."
)
server = AgentServer()
@server.rtc_session()
async def entrypoint(ctx: agents.JobContext):
session = AgentSession(
llm=openai.realtime.RealtimeModel(voice="coral")
)
await session.start(
room=ctx.room,
agent=Assistant(),
room_options=room_io.RoomOptions(
audio_input=room_io.AudioInputOptions(
noise_cancellation=lambda params: noise_cancellation.BVCTelephony()
if params.participant.kind == rtc.ParticipantKind.PARTICIPANT_KIND_SIP
else noise_cancellation.BVC(),
),
),
)
await session.generate_reply(
instructions="Greet the user and offer your assistance."
)
if __name__ == "__main__":
agents.cli.run_app(server)Agentfrom livekit.agents import Agent, function_tool
class MyAgent(Agent):
def __init__(self) -> None:
super().__init__(
instructions="Your system prompt here",
)
async def on_enter(self) -> None:
"""Called when agent becomes active."""
await self.session.generate_reply(
instructions="Greet the user"
)
async def on_exit(self) -> None:
"""Called before agent hands off to another agent."""
pass
@function_tool()
async def my_tool(self, param: str) -> str:
"""Tool description for the LLM."""
return f"Result: {param}"session = AgentSession(
stt="assemblyai/universal-streaming:en",
llm="openai/gpt-4.1-mini",
tts="cartesia/sonic-3:voice_id",
vad=silero.VAD.load(),
turn_detection=MultilingualModel(),
)session.start(room, agent)session.say(text)session.generate_reply(instructions)session.interrupt()session.update_agent(new_agent)@function_toolfrom livekit.agents import function_tool, RunContext
@function_tool()
async def get_weather(self, context: RunContext, location: str) -> str:
"""Get the current weather for a location."""
return f"Weather in {location}: Sunny, 72°F"# Development mode with auto-reload
uv run agent.py dev
# Console mode (local testing)
uv run agent.py console
# Production mode
uv run agent.py start
# Download required model files
uv run agent.py download-files"assemblyai/universal-streaming:en""deepgram/nova-3:en""cartesia/ink""openai/gpt-4.1-mini""openai/gpt-4.1""openai/gpt-5""gemini/gemini-3-flash""gemini/gemini-2.5-flash""cartesia/sonic-3:{voice_id}""elevenlabs/eleven_turbo_v2_5:{voice_id}""deepgram/aura:{voice}"lk app env -w