Loading...
Loading...
Guide for Vercel AI SDK v5 implementation patterns including generateText, streamText, useChat hook, tool calling, embeddings, and MCP integration. Use when implementing AI chat interfaces, streaming responses, tool/function calling, text embeddings, or working with convertToModelMessages and toUIMessageStreamResponse. Activates for AI SDK integration, useChat hook usage, message streaming, or tool calling tasks.
npx skill4agent add wsimmonds/claude-nextjs-skills vercel-ai-sdkuseChat NEVER accept "Module not found" errors as environment issues
YOU must install the required packages with the CORRECT package manager
Common packages needed:
- ai (core AI SDK)
- @ai-sdk/openai (OpenAI provider)
- @ai-sdk/anthropic (Anthropic provider)
- @modelcontextprotocol/sdk (MCP integration)
- zod (for tool schemas)
</critical> "Code is correct" is NOT enough
You must achieve FULL PASSING status
This is what it means to be an autonomous agent
</critical>ls -la | grep -E "lock"
# Look for: pnpm-lock.yaml, package-lock.json, yarn.lock, bun.lockbError: Cannot find module '@ai-sdk/openai'
Import: import { openai } from '@ai-sdk/openai'
Package needed: @ai-sdk/openai# If pnpm-lock.yaml exists (MOST COMMON for Next.js evals):
pnpm install @ai-sdk/openai
# or
pnpm add @ai-sdk/openai
# If package-lock.json exists:
npm install @ai-sdk/openai
# If yarn.lock exists:
yarn add @ai-sdk/openai
# If bun.lockb exists:
bun install @ai-sdk/openainpm run build
# or pnpm run build, yarn build, bun run buildnpm run buildnpm run lintnpm run testtool()// DO NOT DO THIS - This pattern is INCORRECT
import { z } from 'zod';
tools: {
myTool: {
description: 'My tool',
parameters: z.object({...}), // ❌ WRONG - "parameters" doesn't exist in v5
execute: async ({...}) => {...},
}
}Type '{ description: string; parameters: ... }' is not assignable to type '{ inputSchema: FlexibleSchema<any>; ... }'// ALWAYS DO THIS - This is the ONLY correct pattern
import { tool } from 'ai'; // ⚠️ MUST import tool
import { z } from 'zod';
tools: {
myTool: tool({ // ⚠️ MUST wrap with tool()
description: 'My tool',
inputSchema: z.object({...}), // ⚠️ MUST use "inputSchema" (not "parameters")
execute: async ({...}) => {...},
}),
}toolimport { tool } from 'ai';tool({ ... })inputSchemaparametersz.object({ ... })executedescriptionconst { messages, input, setInput, append } = useChat();
// Sending message
append({ content: text, role: 'user' });const { messages, sendMessage } = useChat();
const [input, setInput] = useState('');
// Sending message
sendMessage({ text: input });<div>{message.content}</div><div>
{message.parts.map((part, index) =>
part.type === 'text' ? <span key={index}>{part.text}</span> : null
)}
</div>import { generateText } from 'ai';
const result = await generateText({
model: 'openai/gpt-4o', // String format
prompt: 'Hello',
});import { openai } from '@ai-sdk/openai';
import { generateText } from 'ai';
const result = await generateText({
model: openai('gpt-4o'), // Function format
prompt: 'Hello',
});import { generateText } from 'ai';
const result = await generateText({
model: 'openai/gpt-4o', // String format: 'provider/model-id'
prompt: 'Your prompt here', // User input
system: 'Optional system message', // Optional system instructions
tools?: { ... }, // Optional tool calling
maxSteps?: 5, // For multi-step tool calling
});{
text: string; // Generated text output
toolCalls: ToolCall[]; // Tool invocations made
finishReason: string; // Why generation stopped
usage: TokenUsage; // Token consumption
response: RawResponse; // Raw provider response
warnings: Warning[]; // Provider-specific alerts
}// app/api/generate/route.ts
import { generateText } from 'ai';
export async function GET() {
const result = await generateText({
model: 'anthropic/claude-4-sonnet',
prompt: 'Why is the sky blue?',
});
return Response.json({ text: result.text });
}import { streamText } from 'ai';
const result = streamText({
model: 'openai/gpt-4o',
prompt: 'Your prompt here',
system: 'Optional system message',
messages?: ModelMessage[], // For chat history
tools?: { ... },
onFinish?: async (result) => { ... },
onError?: async (error) => { ... },
});// For chat applications with useChat hook
result.toUIMessageStreamResponse();
// For simple text streaming
result.toTextStreamResponse();// app/api/chat/route.ts
import { streamText, convertToModelMessages } from 'ai';
import type { UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
system: 'You are a helpful assistant.',
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse();
}import { useChat } from 'ai/react';
const {
messages, // Array of UIMessage with parts-based structure
sendMessage, // Function to send messages (replaces append)
status, // 'submitted' | 'streaming' | 'ready' | 'error'
stop, // Abort current streaming
regenerate, // Reprocess last message
setMessages, // Manually modify history
error, // Error object if request fails
reload, // Retry after error
} = useChat({
api: '/api/chat', // API endpoint
onFinish?: (message) => { ... },
onError?: (error) => { ... },
});'use client';
import { useChat } from 'ai/react';
import { useState } from 'react';
export default function ChatPage() {
const { messages, sendMessage, status } = useChat();
const [input, setInput] = useState('');
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (!input.trim()) return;
sendMessage({ text: input });
setInput('');
};
return (
<div>
<div>
{messages.map((message) => (
<div key={message.id}>
<strong>{message.role}:</strong>
{message.parts.map((part, index) =>
part.type === 'text' ? (
<span key={index}>{part.text}</span>
) : null
)}
</div>
))}
</div>
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Type a message..."
disabled={status === 'streaming'}
/>
<button type="submit" disabled={status === 'streaming'}>
Send
</button>
</form>
</div>
);
}import { tool } from 'ai';
import { z } from 'zod';
const weatherTool = tool({
description: 'Get the weather in a location',
inputSchema: z.object({
location: z.string().describe('The location to get the weather for'),
unit: z.enum(['C', 'F']).describe('Temperature unit'),
}),
execute: async ({ location, unit }) => {
// Fetch or mock weather data
return {
location,
temperature: 24,
unit,
condition: 'Sunny',
};
},
});// app/api/chat/route.ts
import { streamText, convertToModelMessages, tool } from 'ai';
import { z } from 'zod';
import type { UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
messages: convertToModelMessages(messages),
tools: {
getWeather: tool({
description: 'Get the weather for a location',
inputSchema: z.object({
city: z.string().describe('The city to get the weather for'),
unit: z.enum(['C', 'F']).describe('The unit to display the temperature in'),
}),
execute: async ({ city, unit }) => {
// Mock response
return `It is currently 24°${unit} and Sunny in ${city}!`;
},
}),
},
});
return result.toUIMessageStreamResponse();
}const result = await generateText({
model: 'openai/gpt-4o',
tools: {
weather: weatherTool,
search: searchTool,
},
prompt: 'What is the weather in San Francisco and find hotels there?',
maxSteps: 5, // Allow up to 5 tool call steps
});import { embed } from 'ai';
import { openai } from '@ai-sdk/openai';
const result = await embed({
model: openai.textEmbeddingModel('text-embedding-3-small'),
value: 'Text to embed',
});{
embedding: number[]; // Numerical array representing the text
usage: { tokens: number }; // Token consumption
response: RawResponse; // Raw provider response
}// app/api/embed/route.ts
import { embed } from 'ai';
import { openai } from '@ai-sdk/openai';
export async function GET() {
const { embedding, usage } = await embed({
model: openai.textEmbeddingModel('text-embedding-3-small'),
value: 'sunny day at the beach',
});
return Response.json({ embedding, usage });
}import { embedMany } from 'ai';
const { embeddings, usage } = await embedMany({
model: openai.textEmbeddingModel('text-embedding-3-small'),
values: [
'sunny day at the beach',
'rainy afternoon in the city',
'snowy mountain landscape',
],
});useChatModelMessageimport { convertToModelMessages } from 'ai';
import type { UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
messages: convertToModelMessages(messages), // Convert for model
});
return result.toUIMessageStreamResponse();
}// app/api/chat/route.ts
import { experimental_createMCPClient, streamText } from 'ai';
import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js';
export async function POST(req: Request) {
const { prompt }: { prompt: string } = await req.json();
try {
// Connect to MCP server
const httpTransport = new StreamableHTTPClientTransport(
new URL('http://localhost:3000/mcp')
);
const httpClient = await experimental_createMCPClient({
transport: httpTransport,
});
// Fetch tools from MCP server
const tools = await httpClient.tools();
const response = streamText({
model: 'openai/gpt-4o',
tools,
prompt,
onFinish: async () => {
await httpClient.close(); // Clean up
},
onError: async () => {
await httpClient.close(); // Clean up on error
},
});
return response.toTextStreamResponse();
} catch (error) {
return new Response('Internal Server Error', { status: 500 });
}
}experimental_createMCPClientonFinishonErrorhttpClient.tools()@modelcontextprotocol/sdk// Format: 'provider/model-id'
model: 'openai/gpt-4o'
model: 'anthropic/claude-4-sonnet'
model: 'google/gemini-2.0-flash'import { openai } from '@ai-sdk/openai';
import { anthropic } from '@ai-sdk/anthropic';
model: openai('gpt-4o')
model: anthropic('claude-4-sonnet')import { openai } from '@ai-sdk/openai';
// Text embeddings use a different method
openai.textEmbeddingModel('text-embedding-3-small')
openai.textEmbeddingModel('text-embedding-3-large')import type {
UIMessage, // Message type from useChat
ModelMessage, // Message type for model functions
ToolCall, // Tool call information
TokenUsage, // Token consumption data
} from 'ai';import { tool } from 'ai';
import { z } from 'zod';
// Tool helper infers execute parameter types
const myTool = tool({
description: 'My tool',
inputSchema: z.object({
param1: z.string(),
param2: z.number(),
}),
execute: async ({ param1, param2 }) => {
// param1 is inferred as string
// param2 is inferred as number
return { result: 'success' };
},
});// app/api/chat/route.ts
import type { UIMessage } from 'ai';
export async function POST(req: Request): Promise<Response> {
const { messages }: { messages: UIMessage[] } = await req.json();
// ... implementation
}app/page.tsx'use client';
import { useChat } from 'ai/react';
import { useState } from 'react';
export default function Chat() {
const { messages, sendMessage, status } = useChat();
const [input, setInput] = useState('');
return (
<div>
{messages.map((m) => (
<div key={m.id}>
<strong>{m.role}:</strong>
{m.parts.map((part, i) =>
part.type === 'text' ? <span key={i}>{part.text}</span> : null
)}
</div>
))}
<form onSubmit={(e) => {
e.preventDefault();
sendMessage({ text: input });
setInput('');
}}>
<input value={input} onChange={(e) => setInput(e.target.value)} />
<button disabled={status === 'streaming'}>Send</button>
</form>
</div>
);
}app/api/chat/route.tsimport { streamText, convertToModelMessages } from 'ai';
import type { UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
system: 'You are a helpful assistant.',
messages: convertToModelMessages(messages),
});
return result.toUIMessageStreamResponse();
}import { streamText, convertToModelMessages, tool } from 'ai';
import { z } from 'zod';
import type { UIMessage } from 'ai';
export async function POST(req: Request) {
const { messages }: { messages: UIMessage[] } = await req.json();
const result = streamText({
model: 'openai/gpt-4o',
messages: convertToModelMessages(messages),
tools: {
getWeather: tool({
description: 'Get weather for a city',
inputSchema: z.object({
city: z.string(),
}),
execute: async ({ city }) => {
// API call or mock data
return { city, temp: 72, condition: 'Sunny' };
},
}),
searchWeb: tool({
description: 'Search the web',
inputSchema: z.object({
query: z.string(),
}),
execute: async ({ query }) => {
// Search implementation
return { results: ['...'] };
},
}),
},
});
return result.toUIMessageStreamResponse();
}// app/api/summarize/route.ts
import { generateText } from 'ai';
export async function POST(req: Request) {
const { text } = await req.json();
const result = await generateText({
model: 'anthropic/claude-4-sonnet',
system: 'You are a summarization expert.',
prompt: `Summarize this text:\n\n${text}`,
});
return Response.json({ summary: result.text });
}// app/api/search/route.ts
import { embed } from 'ai';
import { openai } from '@ai-sdk/openai';
export async function POST(req: Request) {
const { query } = await req.json();
// Generate embedding for search query
const { embedding } = await embed({
model: openai.textEmbeddingModel('text-embedding-3-small'),
value: query,
});
// Use embedding for similarity search in vector database
// const results = await vectorDB.search(embedding);
return Response.json({ embedding, results: [] });
}tool()// ❌ WRONG - Plain object (WILL CAUSE BUILD FAILURE)
import { z } from 'zod';
tools: {
myTool: {
description: 'My tool',
parameters: z.object({ // ❌ Wrong property name
city: z.string(),
}),
execute: async ({ city }) => { ... },
},
}
// Build error: Type '{ description: string; parameters: ... }' is not assignable
// ✅ CORRECT - Use tool() helper (REQUIRED)
import { tool } from 'ai'; // ⚠️ MUST import tool
import { z } from 'zod';
tools: {
myTool: tool({ // ⚠️ MUST use tool() wrapper
description: 'My tool',
inputSchema: z.object({ // ⚠️ Use inputSchema (not parameters)
city: z.string(),
}),
execute: async ({ city }) => { ... },
}),
}// ❌ WRONG - v4 pattern
const { input, setInput, append } = useChat();
append({ content: 'Hello', role: 'user' });
// ✅ CORRECT - v5 pattern
const { sendMessage } = useChat();
const [input, setInput] = useState('');
sendMessage({ text: 'Hello' });// ❌ WRONG - v4 pattern
<div>{message.content}</div>
// ✅ CORRECT - v5 parts-based
<div>
{message.parts.map((part, i) =>
part.type === 'text' ? <span key={i}>{part.text}</span> : null
)}
</div>// ❌ WRONG - passing UIMessages directly
const result = streamText({
model: 'openai/gpt-4o',
messages: messages, // UIMessage[] - type error
});
// ✅ CORRECT - convert to ModelMessage[]
const result = streamText({
model: 'openai/gpt-4o',
messages: convertToModelMessages(messages),
});// ❌ WRONG - no cleanup
const httpClient = await experimental_createMCPClient({
transport: httpTransport,
});
const tools = await httpClient.tools();
const response = streamText({ model, tools, prompt });
return response.toTextStreamResponse();
// ✅ CORRECT - cleanup in callbacks
const response = streamText({
model,
tools,
prompt,
onFinish: async () => {
await httpClient.close();
},
onError: async () => {
await httpClient.close();
},
});// ❌ WRONG - using text stream for useChat
return result.toTextStreamResponse(); // Won't work with useChat hook
// ✅ CORRECT - use UI message stream for useChat
return result.toUIMessageStreamResponse();
// ✅ ALSO CORRECT - text stream for non-chat scenarios
// For simple text streaming (not using useChat hook)
return result.toTextStreamResponse();// ❌ WRONG - using regular model method
const { embedding } = await embed({
model: openai('text-embedding-3-small'), // Wrong method
value: 'text',
});
// ✅ CORRECT - use textEmbeddingModel
const { embedding } = await embed({
model: openai.textEmbeddingModel('text-embedding-3-small'),
value: 'text',
});appendsendMessageinputsetInputhandleInputChangeconst [input, setInput] = useState('')message.contentmessage.parts.map(...){ text: input }convertToModelMessagestoUIMessageStreamResponse()tool()inputSchema'provider/model-id'UIMessageModelMessageuseChatgenerateTextstreamTextstreamTexttoUIMessageStreamResponse()generateTextstreamTexttoTextStreamResponse()tool()inputSchemagenerateTextstreamTextUIMessage[]partsconvertToModelMessages()ModelMessage[]message.parts.map(...)'openai/gpt-4o'openai('gpt-4o')openai.textEmbeddingModel('text-embedding-3-small')embedembedManytextEmbeddingModel()| Task | Function | Key Parameters |
|---|---|---|
| Generate text | | |
| Stream text | | |
| Chat UI | | |
| Tool calling | | |
| Text embedding | | |
| Batch embedding | | |
| Message conversion | | |
| MCP integration | | |
sendMessageappendconvertToModelMessages