Loading...
Loading...
Use this skill when a PinMe project (Worker TypeScript) needs to call OpenRouter-backed LLM APIs, including models, chat/completions, streaming, or OpenRouter web search. Guides AI to generate correct Worker TS code.
npx skill4agent add glitternetwork/pinme pinme-llm// backend/src/worker.ts
export interface Env {
DB: D1Database;
API_KEY: string; // Project API Key from create_worker
PROJECT_NAME: string; // Actual project_name from create_worker; must match API_KEY
BASE_URL?: string; // Optional override for PinMe API base URL, defaults to https://pinme.cloud
}authenticates the Worker to PinMe.API_KEYis required forPROJECT_NAMEand must belong to the same project aschat/completions. WhenAPI_KEYis not set, useBASE_URL.https://pinme.cloud
GET {BASE_URL}/api/v1/modelsX-API-Keyenv.API_KEY/modelsasync function listModels(env: Env): Promise<unknown> {
const baseUrl = env.BASE_URL ?? 'https://pinme.cloud';
const resp = await fetch(`${baseUrl}/api/v1/models`, {
headers: { 'X-API-Key': env.API_KEY },
});
if (!resp.ok) {
throw new Error(await extractPinmeOpenRouterError(resp));
}
return await resp.json();
}POST {BASE_URL}/api/v1/chat/completions?project_name={project_name}X-API-Keyenv.API_KEYstream: trueopenrouter:web_searchtools{
"model": "openai/gpt-4o-mini",
"messages": [
{ "role": "system", "content": "You are a helpful assistant." },
{ "role": "user", "content": "Hello!" }
],
"stream": true
}Usefromenv.PROJECT_NAME; always URL-encode it in the query string. For available models, callcreate_workeror refer to OpenRouter model IDs.GET /api/v1/models
openrouter:web_searchchat/completionsmax_resultsmax_total_resultsasync function searchWithLLM(env: Env, query: string): Promise<string> {
const baseUrl = env.BASE_URL ?? 'https://pinme.cloud';
const resp = await fetch(
`${baseUrl}/api/v1/chat/completions?project_name=${encodeURIComponent(env.PROJECT_NAME)}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': env.API_KEY,
},
body: JSON.stringify({
model: 'openai/gpt-5.2',
messages: [{ role: 'user', content: query }],
tools: [
{
type: 'openrouter:web_search',
parameters: {
engine: 'auto',
max_results: 5,
max_total_results: 10,
},
},
],
}),
},
);
if (!resp.ok) {
throw new Error(await extractPinmeOpenRouterError(resp));
}
const data = await resp.json() as { choices: Array<{ message?: { content?: string } }> };
return data.choices[0]?.message?.content ?? '';
}{
"id": "chatcmpl-...",
"choices": [{ "message": { "role": "assistant", "content": "Hello!" }, "finish_reason": "stop" }],
"usage": { "prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15 }
}data: {"choices":[{"delta":{"content":"Hello"}}]}
data: {"choices":[{"delta":{"content":" there"}}]}
data: [DONE]| HTTP Status | Meaning | data.error Example |
|---|---|---|
| 401 | API Key missing, invalid, or mismatched with project_name | |
| 400 | project_name missing or OpenRouter key not configured | |
| 403 | LLM balance insufficient or disabled | |
| 413 | Request body exceeds 1MB | |
| 500 | Proxy failed before upstream request | |
| 502 | LLM service unavailable | |
async function callLLM(
env: Env,
messages: Array<{ role: string; content: string }>,
model = 'openai/gpt-4o-mini',
): Promise<{ content: string; error?: string }> {
const baseUrl = env.BASE_URL ?? 'https://pinme.cloud';
const resp = await fetch(
`${baseUrl}/api/v1/chat/completions?project_name=${encodeURIComponent(env.PROJECT_NAME)}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': env.API_KEY,
},
body: JSON.stringify({ model, messages }),
},
);
if (!resp.ok) {
return { content: '', error: await extractPinmeOpenRouterError(resp) };
}
const data = await resp.json() as { choices: Array<{ message: { content: string } }> };
return { content: data.choices[0]?.message?.content || '' };
}
// Usage in routes
async function handleChat(request: Request, env: Env): Promise<Response> {
const { question } = await request.json() as { question: string };
const result = await callLLM(env, [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: question },
]);
if (result.error) {
return json({ error: result.error }, 502);
}
return json({ answer: result.content });
}async function handleChatStream(request: Request, env: Env): Promise<Response> {
const body = await request.text();
const baseUrl = env.BASE_URL ?? 'https://pinme.cloud';
// Ensure stream=true in the request
let parsed = JSON.parse(body);
parsed.stream = true;
const resp = await fetch(
`${baseUrl}/api/v1/chat/completions?project_name=${encodeURIComponent(env.PROJECT_NAME)}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-API-Key': env.API_KEY,
},
body: JSON.stringify(parsed),
},
);
if (!resp.ok) {
return json({ error: await extractPinmeOpenRouterError(resp) }, resp.status);
}
// Pass through SSE stream directly
return new Response(resp.body, {
status: 200,
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
...CORS_HEADERS,
},
});
}async function streamChat(question: string, onChunk: (text: string) => void): Promise<void> {
const resp = await fetch(getApiUrl('/api/chat/stream'), {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ question }),
});
const reader = resp.body!.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop()!; // Keep incomplete line
for (const line of lines) {
if (!line.startsWith('data: ')) continue;
const payload = line.slice(6);
if (payload === '[DONE]') return;
const chunk = JSON.parse(payload) as { choices: Array<{ delta: { content?: string } }> };
const content = chunk.choices[0]?.delta?.content;
if (content) onChunk(content);
}
}
}/api/v1/models/api/v1/chat/completionsinterface PinmeResponse<T = unknown> {
code: number; // 200=success, other=failure
msg: string; // "ok" | "error" | "invalid params"
data?: T; // Business data on success, may contain { error: string } on failure
}async function extractPinmeOpenRouterError(resp: Response): Promise<string> {
const fallback = `HTTP ${resp.status}`;
try {
const body = await resp.clone().json() as PinmeResponse | { error?: { message?: string } } | { error?: string };
if ('data' in body && body.data && typeof body.data === 'object' && 'error' in body.data) {
return String((body.data as { error: unknown }).error);
}
if ('msg' in body && typeof body.msg === 'string' && body.msg) {
return body.msg;
}
if ('error' in body) {
const error = body.error;
if (typeof error === 'string') return error;
if (error && typeof error === 'object' && 'message' in error) {
return String((error as { message: unknown }).message);
}
}
} catch {
try {
const text = await resp.text();
if (text) return text;
} catch {
// Ignore and return fallback below.
}
}
return fallback;
}POSTasync function callOpenRouterJSON<T>(url: string, apiKey: string, body: unknown): Promise<{ data?: T; error?: string }> {
let resp: Response;
try {
resp = await fetch(url, {
method: 'POST',
headers: { 'Content-Type': 'application/json', 'X-API-Key': apiKey },
body: JSON.stringify(body),
});
} catch {
return { error: 'Network error' };
}
if (!resp.ok) {
return { error: await extractPinmeOpenRouterError(resp) };
}
return { data: await resp.json() as T };
}const baseUrl = env.BASE_URL ?? 'https://pinme.cloud';
// Call LLM (non-streaming)
const llmResult = await callOpenRouterJSON<{ choices: Array<{ message: { content: string } }> }>(
`${baseUrl}/api/v1/chat/completions?project_name=${encodeURIComponent(env.PROJECT_NAME)}`, env.API_KEY,
{ model: 'openai/gpt-4o-mini', messages: [{ role: 'user', content: 'Hi' }] },
);
if (llmResult.error) return json({ error: llmResult.error }, 502);