Loading...
Loading...
Compare original and translation side by side
message()json()lastRawnpm install modelmixnpm install modelmiximport { ModelMix } from 'modelmix';import { ModelMix } from 'modelmix';// Static factory (preferred)
const model = ModelMix.new();
// With global options
const model = ModelMix.new({
options: { max_tokens: 4096, temperature: 0.7 },
config: {
system: "You are a helpful assistant.",
max_history: 5,
debug: 0, // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
roundRobin: false // false=fallback, true=rotate models
}
});// 静态工厂方法(推荐)
const model = ModelMix.new();
// 带全局配置选项
const model = ModelMix.new({
options: { max_tokens: 4096, temperature: 0.7 },
config: {
system: "你是一个乐于助人的助手。",
max_history: 5,
debug: 0, // 0=静默, 1=极简, 2=摘要, 3=完整(不截断), 4=详细
roundRobin: false // false=降级模式, true=模型轮询
}
});const model = ModelMix.new()
.sonnet46() // primary
.gpt52() // fallback 1
.gemini3flash() // fallback 2
.addText("Hello!")sonnet45gpt5minigemini3flashconst model = ModelMix.new()
.sonnet46() // 主模型
.gpt52() // 降级备选1
.gemini3flash() // 降级备选2
.addText("Hello!")sonnet45gpt5minigemini3flashgpt52gpt51gpt5gpt5minigpt5nanogpt41gpt41minigpt41nanoopus46opus45sonnet46sonnet45haiku45haiku35thinkgemini3progemini3flashgemini25progemini25flashgrok4grok41sonarsonarProscoutmaverickqwen3kimiK2deepseekR1gptOssminimaxM21deepseekV32GLM47mix.methodName(){ options, config }gpt52gpt51gpt5gpt5minigpt5nanogpt41gpt41minigpt41nanoopus46opus45sonnet46sonnet45haiku45haiku35thinkgemini3progemini3flashgemini25progemini25flashgrok4grok41sonarsonarProscoutmaverickqwen3kimiK2deepseekR1gptOssminimaxM21deepseekV32GLM47mix.methodName(){ options, config }const answer = await ModelMix.new()
.gpt5mini()
.addText("What is the capital of France?")
.message();const answer = await ModelMix.new()
.gpt5mini()
.addText("法国的首都是什么?")
.message();const result = await ModelMix.new()
.gpt5mini()
.addText("Name and capital of 3 South American countries.")
.json(
{ countries: [{ name: "", capital: "" }] }, // schema example
{ countries: [{ name: "country name", capital: "in uppercase" }] }, // descriptions
{ addNote: true } // options
);
// result.countries → [{ name: "Brazil", capital: "BRASILIA" }, ...]json()json(schemaExample, schemaDescription?, { addSchema, addExample, addNote }?)const result = await ModelMix.new()
.gpt5mini()
.addText("列出3个南美国家的名称及首都。")
.json(
{ countries: [{ name: "", capital: "" }] }, // 示例 schema
{ countries: [{ name: "国家名称", capital: "大写格式" }] }, // 字段描述
{ addNote: true } // 选项
);
// result.countries → [{ name: "Brazil", capital: "BRASILIA" }, ...]json()json(schemaExample, schemaDescription?, { addSchema, addExample, addNote }?)const result = await model.json(
{ name: 'martin', age: 22, sex: 'Male' },
{
name: { description: 'Name of the actor', required: false },
age: 'Age of the actor', // string still works
sex: { description: 'Gender', enum: ['Male', 'Female', null] }
}
);descriptionrequiredtruefalseenumnulldefaultconst result = await model.json(
{ name: 'martin', age: 22, sex: 'Male' },
{
name: { description: '演员姓名', required: false },
age: '演员年龄', // 字符串格式依然有效
sex: { description: '性别', enum: ['Male', 'Female', null] }
}
);descriptionrequiredtruefalseenumnulldefault{ out: [...] }const result = await model.json([{ name: 'martin' }]);
// result is an array: [{ name: "Martin" }, { name: "Carlos" }, ...]{ out: [...] }const result = await model.json([{ name: 'martin' }]);
// result 为数组格式: [{ name: "Martin" }, { name: "Carlos" }, ...]await ModelMix.new()
.gpt5mini()
.addText("Tell me a story.")
.stream(({ delta, message }) => {
process.stdout.write(delta);
});await ModelMix.new()
.gpt5mini()
.addText("给我讲一个故事。")
.stream(({ delta, message }) => {
process.stdout.write(delta);
});const raw = await ModelMix.new()
.sonnet45think()
.addText("Solve this step by step: 2+2*3")
.raw();
// raw.message, raw.think, raw.tokens, raw.toolCalls, raw.responseconst raw = await ModelMix.new()
.sonnet45think()
.addText("一步步解决这个问题:2+2*3")
.raw();
// raw.message, raw.think, raw.tokens, raw.toolCalls, raw.responsemessage()json()lastRawlastRawmessage()json()message()json()block()stream()lastRawraw()const model = ModelMix.new().gpt5mini().addText("Hello!");
const text = await model.message();
console.log(model.lastRaw.tokens);
// { input: 122, output: 86, total: 541, cost: 0.000319 }
console.log(model.lastRaw.think); // reasoning content (if available)
console.log(model.lastRaw.response); // raw API responsemessage()json()block()stream()lastRawraw()const model = ModelMix.new().gpt5mini().addText("你好!");
const text = await model.message();
console.log(model.lastRaw.tokens);
// { input: 122, output: 86, total: 541, cost: 0.000319 }
console.log(model.lastRaw.think); // 推理内容(若可用)
console.log(model.lastRaw.response); // 原始API响应const model = ModelMix.new().sonnet45();
model.addImage('./photo.jpg'); // from file
model.addImageFromUrl('https://example.com/img.png'); // from URL
model.addText('Describe this image.');
const description = await model.message();const model = ModelMix.new().sonnet45();
model.addImage('./photo.jpg'); // 从本地文件添加
model.addImageFromUrl('https://example.com/img.png'); // 从URL添加
model.addText('描述这张图片。');
const description = await model.message();const model = ModelMix.new().gpt5mini();
model.setSystemFromFile('./prompts/system.md');
model.addTextFromFile('./prompts/task.md');
model.replace({
'{role}': 'data analyst',
'{language}': 'Spanish'
});
model.replaceKeyFromFile('{code}', './src/utils.js');
console.log(await model.message());const model = ModelMix.new().gpt5mini();
model.setSystemFromFile('./prompts/system.md');
model.addTextFromFile('./prompts/task.md');
model.replace({
'{role}': '数据分析师',
'{language}': '西班牙语'
});
model.replaceKeyFromFile('{code}', './src/utils.js');
console.log(await model.message());const pool = ModelMix.new({ config: { roundRobin: true } })
.gpt5mini()
.sonnet45()
.gemini3flash();
// Each call rotates to the next model
const r1 = await pool.new().addText("Request 1").message();
const r2 = await pool.new().addText("Request 2").message();const pool = ModelMix.new({ config: { roundRobin: true } })
.gpt5mini()
.sonnet45()
.gemini3flash();
// 每次调用会切换到下一个模型
const r1 = await pool.new().addText("请求1").message();
const r2 = await pool.new().addText("请求2").message();const model = ModelMix.new({ config: { max_history: 10 } }).gpt5nano();
model.setSystem('You are an assistant. Today is ' + new Date().toISOString());
await model.addMCP('@modelcontextprotocol/server-brave-search');
model.addText('Use Internet: What is the latest news about AI?');
console.log(await model.message());BRAVE_API_KEY.envconst model = ModelMix.new({ config: { max_history: 10 } }).gpt5nano();
model.setSystem('你是一个助手。今天的日期是 ' + new Date().toISOString());
await model.addMCP('@modelcontextprotocol/server-brave-search');
model.addText('使用互联网查询:AI领域的最新资讯是什么?');
console.log(await model.message());.envBRAVE_API_KEYconst model = ModelMix.new({ config: { max_history: 10 } }).gpt5mini();
model.addTool({
name: "get_weather",
description: "Get weather for a city",
inputSchema: {
type: "object",
properties: { city: { type: "string" } },
required: ["city"]
}
}, async ({ city }) => {
return `The weather in ${city} is sunny, 25C`;
});
model.addText("What's the weather in Tokyo?");
console.log(await model.message());const model = ModelMix.new({ config: { max_history: 10 } }).gpt5mini();
model.addTool({
name: "get_weather",
description: "获取城市天气",
inputSchema: {
type: "object",
properties: { city: { type: "string" } },
required: ["city"]
}
}, async ({ city }) => {
return `${city}的天气是晴天,气温25摄氏度`;
});
model.addText("东京的天气怎么样?");
console.log(await model.message());const model = ModelMix.new({
config: {
bottleneck: {
maxConcurrent: 4,
minTime: 1000
}
}
}).gpt5mini();const model = ModelMix.new({
config: {
bottleneck: {
maxConcurrent: 4,
minTime: 1000
}
}
}).gpt5mini();const model = ModelMix.new({
config: { debug: 2 } // 0=silent, 1=minimal, 2=summary, 3=full (no truncate), 4=verbose
}).gpt5mini();DEBUG=ModelMix* node script.jsconst model = ModelMix.new({
config: { debug: 2 } // 0=静默, 1=极简, 2=摘要, 3=完整(不截断), 4=详细
}).gpt5mini();DEBUG=ModelMix* node script.js// These use providers with free quotas (OpenRouter, Groq, Cerebras)
const model = ModelMix.new()
.gptOss()
.kimiK2()
.deepseekR1()
.hermes3()
.addText("What is the capital of France?");
console.log(await model.message());// 这些模型使用提供免费额度的提供商(OpenRouter、Groq、Cerebras)
const model = ModelMix.new()
.gptOss()
.kimiK2()
.deepseekR1()
.hermes3()
.addText("法国的首都是什么?");
console.log(await model.message());const chat = ModelMix.new({ config: { max_history: 10 } }).gpt5mini();
chat.addText("My name is Martin.");
await chat.message();
chat.addText("What's my name?");
const reply = await chat.message(); // "Martin"const chat = ModelMix.new({ config: { max_history: 10 } }).gpt5mini();
chat.addText("我的名字是Martin。");
await chat.message();
chat.addText("我叫什么名字?");
const reply = await chat.message(); // "Martin"package.jsonmodelmixnpm installModelMix.new()new ModelMix().envdotenv/configprocess.loadEnvFile()addTool()max_history.json(){ description, required, enum, default }.message().raw()thinksonnet45think(){key}require{ ModelMix }MixOpenAIMixAnthropicMixGoogleMixPerplexityMixGroqMixTogetherMixGrokMixOpenRouterMixOllamaMixLMStudioMixCustomMixCerebrasMixFireworksMixMiniMaxnpm installpackage.jsonmodelmixModelMix.new()new ModelMix().envdotenv/configprocess.loadEnvFile()addTool()max_history.json(){ description, required, enum, default }.message().raw()thinksonnet45think(){key}require{ ModelMix }MixOpenAIMixAnthropicMixGoogleMixPerplexityMixGroqMixTogetherMixGrokMixOpenRouterMixOllamaMixLMStudioMixCustomMixCerebrasMixFireworksMixMiniMax| Method | Returns | Description |
|---|---|---|
| | Add user message |
| | Add user message from file |
| | Set system prompt |
| | Set system prompt from file |
| | Add image from file |
| | Add image from URL or data URI |
| | Set placeholder replacements |
| | Replace placeholder with file content |
| | Get text response |
| | Get structured JSON. Descriptions support descriptor objects |
| | Full response |
| | Full response from last |
| | Stream response |
| | Extract code block from response |
| | Add MCP server tools |
| | Register custom local tool |
| | Register multiple tools |
| | Remove a tool |
| | List registered tools |
| | Clone instance sharing models |
| | Attach custom provider |
| 方法 | 返回值 | 描述 |
|---|---|---|
| | 添加用户消息 |
| | 从文件添加用户消息 |
| | 设置系统提示词 |
| | 从文件设置系统提示词 |
| | 从文件添加图片 |
| | 从URL或数据URI添加图片 |
| | 设置占位符替换规则 |
| | 用文件内容替换指定占位符 |
| | 获取文本响应 |
| | 获取结构化JSON。描述支持使用 |
| | 获取完整原始响应 |
| | 最近一次 |
| | 实现流式响应 |
| | 从响应中提取代码块 |
| | 添加MCP服务工具 |
| | 注册自定义本地工具 |
| | 批量注册工具 |
| | 移除指定工具 |
| | 列出已注册的工具 |
| | 克隆实例并共享已关联的模型 |
| | 关联自定义提供商 |