Loading...
Loading...
Develop examples for AI SDK functions. Use when creating, running, or modifying examples under examples/ai-functions/src to validate provider support, demonstrate features, or create test fixtures.
npx skill4agent add vercel/ai develop-ai-functions-exampleexamples/ai-functions/examples/ai-functions/src/| Directory | Purpose |
|---|---|
| Non-streaming text generation with |
| Streaming text generation with |
| Structured output generation with |
| Streaming structured output with |
| |
| Single embedding generation with |
| Batch embedding generation with |
| Image generation with |
| Text-to-speech with |
| Audio transcription with |
| Document reranking with |
| Custom middleware implementations |
| Provider registry setup and usage |
| OpenTelemetry integration |
| Multi-component examples (agents, routers) |
| Shared utilities (not examples) |
| Reusable tool definitions |
{provider}-{feature}.ts| Pattern | Example | Description |
|---|---|---|
| | Basic provider usage |
| | Specific feature |
| | Provider with sub-provider |
| | Sub-provider with feature |
run()lib/run.ts.envimport { providerName } from '@ai-sdk/provider-name';
import { generateText } from 'ai';
import { run } from '../lib/run';
run(async () => {
const result = await generateText({
model: providerName('model-id'),
prompt: 'Your prompt here.',
});
console.log(result.text);
console.log('Token usage:', result.usage);
console.log('Finish reason:', result.finishReason);
});import { providerName } from '@ai-sdk/provider-name';
import { streamText } from 'ai';
import { printFullStream } from '../lib/print-full-stream';
import { run } from '../lib/run';
run(async () => {
const result = streamText({
model: providerName('model-id'),
prompt: 'Your prompt here.',
});
await printFullStream({ result });
});import { providerName } from '@ai-sdk/provider-name';
import { generateText, tool } from 'ai';
import { z } from 'zod';
import { run } from '../lib/run';
run(async () => {
const result = await generateText({
model: providerName('model-id'),
tools: {
myTool: tool({
description: 'Tool description',
inputSchema: z.object({
param: z.string().describe('Parameter description'),
}),
execute: async ({ param }) => {
return { result: `Processed: ${param}` };
},
}),
},
prompt: 'Use the tool to...',
});
console.log(JSON.stringify(result, null, 2));
});import { providerName } from '@ai-sdk/provider-name';
import { generateObject } from 'ai';
import { z } from 'zod';
import { run } from '../lib/run';
run(async () => {
const result = await generateObject({
model: providerName('model-id'),
schema: z.object({
name: z.string(),
items: z.array(z.string()),
}),
prompt: 'Generate a...',
});
console.log(JSON.stringify(result.object, null, 2));
console.log('Token usage:', result.usage);
});examples/ai-functionspnpm tsx src/generate-text/openai.ts
pnpm tsx src/stream-text/openai-tool-call.ts
pnpm tsx src/agent/openai-generate.tsgenerateTextstreamTextgenerateObjectproviderOptionscapture-api-response-test-fixturelib/| File | Purpose |
|---|---|
| Error-handling wrapper with |
| Clean object printing (removes undefined values) |
| Colored streaming output for tool calls, reasoning, text |
| Save streaming chunks for test fixtures |
| Display images in terminal |
| Save audio files to disk |
import { print } from '../lib/print';
// Pretty print objects without undefined values
print('Result:', result);
print('Usage:', result.usage, { depth: 2 });import { printFullStream } from '../lib/print-full-stream';
const result = streamText({ ... });
await printFullStream({ result }); // Colored output for text, tool calls, reasoningtools/import { weatherTool } from '../tools/weather-tool';
const result = await generateText({
model: openai('gpt-4o'),
tools: { weather: weatherTool },
prompt: 'What is the weather in San Francisco?',
});run()weatherTooltools/