Loading...
Loading...
Step-by-step guide for adding support for a new LLM in Dust. Use when adding a new model, or updating a previous one.
npx skill4agent add dust-tt/dust dust-llm| File | Purpose |
|---|---|
| Model ID + configuration |
| Pricing per million tokens |
| Central registry |
| Router whitelist |
| SDK types |
| UI availability (optional) |
| Integration tests |
gpt-4-turbo-2024-04-09front/types/assistant/models/openai.tsexport const GPT_4_TURBO_2024_04_09_MODEL_ID = "gpt-4-turbo-2024-04-09" as const;
export const GPT_4_TURBO_2024_04_09_MODEL_CONFIG: ModelConfigurationType = {
providerId: "openai",
modelId: GPT_4_TURBO_2024_04_09_MODEL_ID,
displayName: "GPT 4 turbo",
contextSize: 128_000,
recommendedTopK: 32,
recommendedExhaustiveTopK: 64,
largeModel: true,
description: "OpenAI's GPT 4 Turbo model for complex tasks (128k context).",
shortDescription: "OpenAI's second best model.",
isLegacy: false,
isLatest: false,
generationTokensCount: 2048,
supportsVision: true,
minimumReasoningEffort: "none",
maximumReasoningEffort: "none",
defaultReasoningEffort: "none",
supportsResponseFormat: false,
tokenizer: { type: "tiktoken", base: "cl100k_base" },
};front/lib/api/assistant/token_pricing.tsconst CURRENT_MODEL_PRICING: Record<BaseModelIdType, PricingEntry> = {
// ... existing
"gpt-4-turbo-2024-04-09": {
input: 10.0, // USD per million input tokens
output: 30.0, // USD per million output tokens
cache_read_input_tokens: 1.0, // Optional: cached reads
cache_creation_input_tokens: 12.5, // Optional: cache creation
},
};front/types/assistant/models/models.tsexport const MODEL_IDS = [
// ... existing
GPT_4_TURBO_2024_04_09_MODEL_ID,
] as const;
export const SUPPORTED_MODEL_CONFIGS: ModelConfigurationType[] = [
// ... existing
GPT_4_TURBO_2024_04_09_MODEL_CONFIG,
];front/lib/api/llm/clients/openai/types.tsexport const OPENAI_WHITELISTED_MODEL_IDS = [
// ... existing
GPT_4_TURBO_2024_04_09_MODEL_ID,
] as const;sdks/js/src/types.tsconst ModelLLMIdSchema = FlexibleEnumSchema<
// ... existing
| "gpt-4-turbo-2024-04-09"
>();front/components/providers/types.tsexport const USED_MODEL_CONFIGS: readonly ModelConfig[] = [
// ... existing
GPT_4_TURBO_2024_04_09_MODEL_CONFIG,
] as const;front/lib/api/llm/tests/llm.test.tsconst MODELS = {
// ... existing
[GPT_4_TURBO_2024_04_09_MODEL_ID]: {
runTest: true, // Enable for testing
providerId: "openai",
},
};RUN_LLM_TEST=true npx vitest --config lib/api/llm/tests/vite.config.js lib/api/llm/tests/llm.test.ts --runrunTest: falsefront/types/assistant/models/anthropic.tsCLAUDE_X_MODEL_IDfront/lib/api/llm/clients/anthropic/types.tsANTHROPIC_WHITELISTED_MODEL_IDSfront/types/assistant/models/models.tsfront/lib/api/assistant/token_pricing.tssdks/js/src/types.ts| Property | Description |
|---|---|
| Can process images |
| Supports structured output (JSON) |
| Min reasoning level ("none", "low", "medium", "high") |
| Max reasoning level |
| Default reasoning level |
| Tokenizer config for token counting |
MODEL_IDSSUPPORTED_MODEL_CONFIGSUSED_MODEL_CONFIGSfront/components/providers/types.tsfront/types/assistant/models/openai.tsanthropic.ts