Loading...
Loading...
Bridge any AI agent backend to WeChat using the weixin-agent-sdk framework with simple Agent interface, login, and message loop.
npx skill4agent add aradotso/trending-skills weixin-agent-sdkSkill by ara.so — Daily 2026 Skills collection.
weixin-agent-sdkAgent# npm
npm install weixin-agent-sdk
# pnpm (monorepo)
pnpm add weixin-agent-sdkimport { login } from "weixin-agent-sdk";
await login();
// Credentials are persisted to ~/.openclaw/ — run once, then use start()import { login, start, type Agent } from "weixin-agent-sdk";
const echo: Agent = {
async chat(req) {
return { text: `You said: ${req.text}` };
},
};
await login();
await start(echo);Agentinterface Agent {
chat(request: ChatRequest): Promise<ChatResponse>;
}
interface ChatRequest {
conversationId: string; // Unique user/conversation identifier
text: string; // Message text content
media?: {
type: "image" | "audio" | "video" | "file";
filePath: string; // Local path (already downloaded & decrypted)
mimeType: string;
fileName?: string;
};
}
interface ChatResponse {
text?: string; // Markdown supported; auto-converted to plain text
media?: {
type: "image" | "video" | "file";
url: string; // Local path OR HTTPS URL (auto-downloaded)
fileName?: string;
};
}login()~/.openclaw/start(agent)import { login, start, type Agent } from "weixin-agent-sdk";
const conversations = new Map<string, string[]>();
const myAgent: Agent = {
async chat(req) {
const history = conversations.get(req.conversationId) ?? [];
history.push(`user: ${req.text}`);
const reply = await callMyAIService(history);
history.push(`assistant: ${reply}`);
conversations.set(req.conversationId, history);
return { text: reply };
},
};
await login();
await start(myAgent);import OpenAI from "openai";
import { login, start, type Agent, type ChatRequest } from "weixin-agent-sdk";
import * as fs from "fs";
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
baseURL: process.env.OPENAI_BASE_URL, // optional override
});
const model = process.env.OPENAI_MODEL ?? "gpt-4o";
const systemPrompt = process.env.SYSTEM_PROMPT ?? "You are a helpful assistant.";
type Message = OpenAI.Chat.ChatCompletionMessageParam;
const histories = new Map<string, Message[]>();
const openaiAgent: Agent = {
async chat(req: ChatRequest) {
const history = histories.get(req.conversationId) ?? [];
// Build user message — support image input
const content: OpenAI.Chat.ChatCompletionContentPart[] = [];
if (req.text) {
content.push({ type: "text", text: req.text });
}
if (req.media?.type === "image") {
const imageData = fs.readFileSync(req.media.filePath).toString("base64");
content.push({
type: "image_url",
image_url: {
url: `data:${req.media.mimeType};base64,${imageData}`,
},
});
}
history.push({ role: "user", content });
const response = await client.chat.completions.create({
model,
messages: [
{ role: "system", content: systemPrompt },
...history,
],
});
const reply = response.choices[0].message.content ?? "";
history.push({ role: "assistant", content: reply });
histories.set(req.conversationId, history);
return { text: reply };
},
};
await login();
await start(openaiAgent);const imageAgent: Agent = {
async chat(req) {
return {
text: "Here is your image:",
media: {
type: "image",
url: "/tmp/output.png", // local path
// or: url: "https://example.com/image.png" — auto-downloaded
},
};
},
};const fileAgent: Agent = {
async chat(req) {
return {
media: {
type: "file",
url: "/tmp/report.pdf",
fileName: "monthly-report.pdf",
},
};
},
};weixin-acp# Claude Code
npx weixin-acp claude-code
# Codex
npx weixin-acp codex
# Any ACP-compatible agent (e.g. kimi-cli)
npx weixin-acp start -- kimi acpweixin-acp| Variable | Required | Description |
|---|---|---|
| Yes | OpenAI API key |
| No | Custom API base URL (OpenAI-compatible services) |
| No | Model name, default |
| No | System prompt for the assistant |
| Command | Description |
|---|---|
| Echoes back directly (bypasses Agent), shows channel latency |
| Toggles debug mode — appends full latency stats to each reply |
| Type | | Notes |
|---|---|---|
| Text | — | Plain text in |
| Image | | Downloaded & decrypted, |
| Voice | | SILK auto-converted to WAV (requires |
| Video | | Downloaded & decrypted |
| File | | Downloaded & decrypted, original filename preserved |
| Quoted message | — | Quoted text appended to |
| Voice-to-text | — | WeChat transcription delivered as |
| Type | Usage |
|---|---|
| Text | Return |
| Image | Return |
| Video | Return |
| File | Return |
| Text + Media | Return both |
| Remote image | Set |
git clone https://github.com/wong2/weixin-agent-sdk
cd weixin-agent-sdk
pnpm install
# Login (scan QR code)
pnpm run login -w packages/example-openai
# Start the OpenAI bot
OPENAI_API_KEY=$OPENAI_API_KEY pnpm run start -w packages/example-openaierrcode -14npm install silk-wasm~/.openclaw/get_updates_buflogin()~/.openclaw/