Loading...
Loading...
Provider-agnostic, type-safe AI SDK for streaming, tool calling, structured output, and multimodal content.
npx skill4agent add tanstack-skills/tanstack-skills tanstack-ai@tanstack/ai@tanstack/ai-client@tanstack/ai-react@tanstack/ai-solid@tanstack/ai-openai@tanstack/ai-anthropic@tanstack/ai-gemini@tanstack/ai-ollamanpm install @tanstack/ai @tanstack/ai-react
# Or for framework-agnostic vanilla client:
npm install @tanstack/ai @tanstack/ai-client
# Provider adapters (install only what you need):
npm install @tanstack/ai-openai
npm install @tanstack/ai-anthropic
npm install @tanstack/ai-gemini
npm install @tanstack/ai-ollamacomposer require tanstack/ai tanstack/ai-openaipip install tanstack-ai tanstack-ai-openaiimport { generate } from '@tanstack/ai'
import { openaiText } from '@tanstack/ai-openai/adapters'
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Explain React hooks in 3 sentences.' },
],
})
// Streaming with async iteration
for await (const chunk of result) {
process.stdout.write(chunk.text)
}import { openaiText } from '@tanstack/ai-openai/adapters'
import { anthropicText } from '@tanstack/ai-anthropic/adapters'
import { geminiText } from '@tanstack/ai-gemini/adapters'
import { ollamaText } from '@tanstack/ai-ollama/adapters'
// OpenAI
const openai = openaiText({ model: 'gpt-4o' })
// Anthropic
const anthropic = anthropicText({ model: 'claude-sonnet-4-20250514' })
// Google Gemini
const gemini = geminiText({ model: 'gemini-pro' })
// Ollama (local)
const ollama = ollamaText({ model: 'llama3' })
// Runtime adapter switching
const adapter = process.env.AI_PROVIDER === 'anthropic' ? anthropic : openaiimport { useChat } from '@tanstack/ai-react'
function ChatUI() {
const { messages, input, setInput, handleSubmit, isLoading } = useChat({
adapter: openaiText({ model: 'gpt-4o' }),
})
return (
<div>
{messages.map((msg) => (
<div key={msg.id}>
<strong>{msg.role}:</strong> {msg.content}
</div>
))}
<form onSubmit={handleSubmit}>
<input
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Type a message..."
/>
<button type="submit" disabled={isLoading}>
Send
</button>
</form>
</div>
)
}import { useCompletion } from '@tanstack/ai-react'
function CompletionUI() {
const { completion, input, setInput, handleSubmit, isLoading } = useCompletion({
adapter: openaiText({ model: 'gpt-4o' }),
})
return (
<div>
<form onSubmit={handleSubmit}>
<textarea
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder="Enter prompt..."
/>
<button type="submit" disabled={isLoading}>Generate</button>
</form>
{completion && <div>{completion}</div>}
</div>
)
}import { createChat } from '@tanstack/ai-solid'
function ChatUI() {
const chat = createChat({
adapter: openaiText({ model: 'gpt-4o' }),
})
return (
<div>
<For each={chat.messages()}>
{(msg) => (
<div>
<strong>{msg.role}:</strong> {msg.content}
</div>
)}
</For>
<form onSubmit={chat.handleSubmit}>
<input
value={chat.input()}
onInput={(e) => chat.setInput(e.target.value)}
placeholder="Type a message..."
/>
<button type="submit" disabled={chat.isLoading()}>
Send
</button>
</form>
</div>
)
}import { createAIClient } from '@tanstack/ai-client'
import { openaiText } from '@tanstack/ai-openai/adapters'
const client = createAIClient({
adapter: openaiText({ model: 'gpt-4o' }),
})
// Subscribe to state changes
client.subscribe((state) => {
console.log('Messages:', state.messages)
console.log('Loading:', state.isLoading)
})
// Send a message
await client.send('Hello, world!')
// Clear conversation
client.clear()import { generate } from '@tanstack/ai'
// Default: stream chunks as they arrive
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [...],
stream: true,
})
for await (const chunk of result) {
// Process each chunk
console.log(chunk.text)
}// Server-side SSE endpoint
import { createReplayStream } from '@tanstack/ai'
export async function handler(req: Request) {
const stream = createReplayStream({
adapter: openaiText({ model: 'gpt-4o' }),
messages: await req.json(),
})
return new Response(stream, {
headers: { 'Content-Type': 'text/event-stream' },
})
}import { generate } from '@tanstack/ai'
import { convertZodToJsonSchema } from '@tanstack/ai'
import { z } from 'zod'
const RecipeSchema = z.object({
name: z.string(),
ingredients: z.array(z.object({
item: z.string(),
amount: z.string(),
})),
steps: z.array(z.string()),
cookTime: z.number(),
})
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [{ role: 'user', content: 'Give me a pasta recipe' }],
schema: convertZodToJsonSchema(RecipeSchema),
})
// result is typed as z.infer<typeof RecipeSchema>
console.log(result.name, result.ingredients)import { generate } from '@tanstack/ai'
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [{ role: 'user', content: 'What is the weather in NYC?' }],
tools: {
getWeather: {
description: 'Get weather for a location',
parameters: z.object({
location: z.string(),
unit: z.enum(['celsius', 'fahrenheit']).optional(),
}),
execute: async ({ location, unit }) => {
const data = await fetchWeather(location, unit)
return data
},
},
},
})import { ToolCallManager } from '@tanstack/ai'
const manager = new ToolCallManager({
tools: {
deleteUser: {
description: 'Delete a user account',
parameters: z.object({ userId: z.string() }),
requiresApproval: true, // Requires human approval
execute: async ({ userId }) => {
await deleteUser(userId)
return { success: true }
},
},
},
onApprovalRequired: async (toolCall) => {
// Present to user for approval
return await showApprovalDialog(toolCall)
},
})const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [{ role: 'user', content: 'Research and summarize the topic' }],
tools: { search, summarize, writeReport },
maxIterations: 10, // Limit agent loop iterations
})// Images
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [{
role: 'user',
content: [
{ type: 'text', text: 'What is in this image?' },
{ type: 'image_url', image_url: { url: 'https://example.com/photo.jpg' } },
],
}],
})
// Image generation with DALL-E
import { openaiImage } from '@tanstack/ai-openai/adapters'
const image = await generate({
adapter: openaiImage({ model: 'dall-e-3' }),
messages: [{ role: 'user', content: 'A sunset over mountains' }],
})
// Image generation with Gemini Imagen
import { geminiImage } from '@tanstack/ai-gemini/adapters'
const image = await generate({
adapter: geminiImage({ model: 'imagen-3' }),
messages: [{ role: 'user', content: 'A futuristic cityscape at night' }],
})import { generate } from '@tanstack/ai'
import { anthropicText } from '@tanstack/ai-anthropic/adapters'
const result = await generate({
adapter: anthropicText({ model: 'claude-sonnet-4-20250514' }),
messages: [{ role: 'user', content: 'Solve this complex math problem step by step...' }],
thinking: {
enabled: true,
budget: 10000, // Max thinking tokens
},
})
// Access thinking/reasoning output
console.log('Thinking:', result.thinking)
console.log('Response:', result.text)
// Streaming with thinking tokens
for await (const chunk of result) {
if (chunk.type === 'thinking') {
console.log('[Thinking]', chunk.text)
} else {
process.stdout.write(chunk.text)
}
}import { generateMessageId, normalizeToUIMessage } from '@tanstack/ai'
// Generate unique message IDs
const id = generateMessageId()
// Normalize provider-specific messages to UI format
const uiMessage = normalizeToUIMessage(providerMessage)const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: [...],
onEvent: (event) => {
// Structured, typed events
switch (event.type) {
case 'text':
console.log('Text chunk:', event.data)
break
case 'tool_call':
console.log('Tool called:', event.name)
break
case 'error':
console.error('Error:', event.error)
break
}
},
})import { TanStackDevtools } from '@tanstack/react-devtools'
import { AIDevtoolsPanel } from '@tanstack/ai-react/devtools'
function App() {
return (
<TanStackDevtools
plugins={[
{
id: 'ai',
name: 'AI',
render: () => <AIDevtoolsPanel />,
},
]}
/>
)
}// Shared implementation between AI tools and server functions
import { createServerFn } from '@tanstack/react-start'
import { generate } from '@tanstack/ai'
const aiChat = createServerFn({ method: 'POST' })
.validator(z.object({ messages: z.array(messageSchema) }))
.handler(async ({ data }) => {
const result = await generate({
adapter: openaiText({ model: 'gpt-4o' }),
messages: data.messages,
})
return result
})import { parsePartialJson } from '@tanstack/ai'
// Parse incomplete JSON during streaming
const partial = parsePartialJson('{"name": "Pasta", "ingredients": [{"item": "flour"')
// Returns: { name: "Pasta", ingredients: [{ item: "flour" }] }maxIterationsrequiresApprovalonEventmaxIterations