Loading...
Loading...
OpenAI-compatible proxy server for Freebuff that translates standard OpenAI API requests into Freebuff's backend format with multi-token rotation and Docker deployment.
npx skill4agent add aradotso/trending-skills freebuff2api-openai-proxySkill by ara.so — Daily 2026 Skills collection.
/v1/chat/completionsnpm i -g freebuff
freebuff # follow login prompts~/.config/manicode/credentials.jsonC:\Users\<username>\.config\manicode\credentials.json{
"default": {
"authToken": "fa82b5c1-e39d-4c7a-961f-d2b3c4e5f6a7"
}
}authTokenAUTH_TOKENS# Single token
docker run -d --name freebuff2api \
-p 8080:8080 \
-e AUTH_TOKENS="$FREEBUFF_TOKEN" \
ghcr.io/quorinex/freebuff2api:latest
# Multiple tokens (comma-separated for higher throughput)
docker run -d --name freebuff2api \
-p 8080:8080 \
-e AUTH_TOKENS="$FREEBUFF_TOKEN_1,$FREEBUFF_TOKEN_2,$FREEBUFF_TOKEN_3" \
ghcr.io/quorinex/freebuff2api:latest
# With HTTP proxy and API key protection
docker run -d --name freebuff2api \
-p 8080:8080 \
-e AUTH_TOKENS="$FREEBUFF_TOKEN" \
-e API_KEYS="$MY_PROXY_API_KEY" \
-e HTTP_PROXY="$HTTP_PROXY_URL" \
ghcr.io/quorinex/freebuff2api:latest# docker-compose.yml
version: "3.8"
services:
freebuff2api:
image: ghcr.io/quorinex/freebuff2api:latest
container_name: freebuff2api
restart: unless-stopped
ports:
- "8080:8080"
environment:
AUTH_TOKENS: "${FREEBUFF_TOKENS}"
API_KEYS: "${PROXY_API_KEYS}"
ROTATION_INTERVAL: "6h"
REQUEST_TIMEOUT: "15m"
# Or mount a config file:
# volumes:
# - ./config.json:/app/config.json
# command: ["-config", "/app/config.json"]# .env file
FREEBUFF_TOKENS=token1,token2,token3
PROXY_API_KEYS=my-secret-keygit clone https://github.com/Quorinex/Freebuff2API.git
cd Freebuff2API
go build -o freebuff2api .
# Run with config file
./freebuff2api -config config.json
# Run with environment variables
AUTH_TOKENS="$FREEBUFF_TOKEN" ./freebuff2apidocker build -t freebuff2api .
docker run -d -p 8080:8080 -e AUTH_TOKENS="$FREEBUFF_TOKEN" freebuff2api{
"LISTEN_ADDR": ":8080",
"UPSTREAM_BASE_URL": "https://codebuff.com",
"AUTH_TOKENS": ["token1", "token2"],
"ROTATION_INTERVAL": "6h",
"REQUEST_TIMEOUT": "15m",
"API_KEYS": ["my-proxy-api-key"],
"HTTP_PROXY": ""
}| Key / Env Var | Default | Description |
|---|---|---|
| | Proxy listen address |
| | Freebuff backend URL |
| — | Freebuff auth tokens (JSON array or comma-separated) |
| | How often to rotate tokens |
| | Upstream request timeout |
| | Client API keys for proxy auth (empty = open access) |
| | HTTP proxy for outbound requests |
http://localhost:8080curl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $PROXY_API_KEY" \
-d '{
"model": "claude-3-5-sonnet",
"messages": [{"role": "user", "content": "Hello!"}]
}'API_KEYScurl http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"model": "claude-3-5-sonnet", "messages": [{"role": "user", "content": "Hello!"}]}'from openai import OpenAI
import os
client = OpenAI(
base_url="http://localhost:8080/v1",
api_key=os.environ.get("PROXY_API_KEY", "unused"), # any value if API_KEYS is empty
)
response = client.chat.completions.create(
model="claude-3-5-sonnet",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Explain async/await in Python."},
],
)
print(response.choices[0].message.content)from openai import OpenAI
import os
client = OpenAI(
base_url="http://localhost:8080/v1",
api_key=os.environ.get("PROXY_API_KEY", "unused"),
)
stream = client.chat.completions.create(
model="claude-3-5-sonnet",
messages=[{"role": "user", "content": "Write a short poem."}],
stream=True,
)
for chunk in stream:
if chunk.choices[0].delta.content is not None:
print(chunk.choices[0].delta.content, end="", flush=True)
print()import OpenAI from "openai";
const client = new OpenAI({
baseURL: "http://localhost:8080/v1",
apiKey: process.env.PROXY_API_KEY ?? "unused",
});
const response = await client.chat.completions.create({
model: "claude-3-5-sonnet",
messages: [{ role: "user", content: "Hello from Node.js!" }],
});
console.log(response.choices[0].message.content);from langchain_openai import ChatOpenAI
import os
llm = ChatOpenAI(
model="claude-3-5-sonnet",
openai_api_base="http://localhost:8080/v1",
openai_api_key=os.environ.get("PROXY_API_KEY", "unused"),
)
result = llm.invoke("What is the capital of France?")
print(result.content)# config.json approach
{
"AUTH_TOKENS": [
"token-account-1",
"token-account-2",
"token-account-3"
],
"ROTATION_INTERVAL": "3h"
}
# Environment variable approach (comma-separated)
export AUTH_TOKENS="token1,token2,token3"{
"API_KEYS": ["secret-key-for-team", "another-key-for-ci"]
}Authorization: Bearer secret-key-for-team{
"HTTP_PROXY": "http://proxy.company.com:3128"
}docker run -d --name freebuff2api \
-p 8080:8080 \
-e AUTH_TOKENS="$FREEBUFF_TOKEN" \
-e HTTP_PROXY="$CORPORATE_PROXY" \
ghcr.io/quorinex/freebuff2api:latest# Check the proxy is responding
curl -s http://localhost:8080/v1/models | jq .
# Or a minimal chat request
curl -s http://localhost:8080/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"model":"claude-3-5-sonnet","messages":[{"role":"user","content":"ping"}]}' \
| jq '.choices[0].message.content'API_KEYSAuthorization: Bearer <key>API_KEYS[]freebuffAUTH_TOKENSREQUEST_TIMEOUT15m{ "REQUEST_TIMEOUT": "30m" }docker psps aux | grep freebuff2apiLISTEN_ADDR127.0.0.1:8080-p 8080:8080# Check logs
docker logs freebuff2api
# Common cause: AUTH_TOKENS not set
docker run --rm -e AUTH_TOKENS="$FREEBUFF_TOKEN" ghcr.io/quorinex/freebuff2api:latestROTATION_INTERVAL"1h""30m""6h"Freebuff2API/
├── main.go # Entry point, config loading, server startup
├── config.json # Default config file (gitignored if contains secrets)
├── Dockerfile # Multi-arch Docker build
├── go.mod / go.sum # Go module dependencies
├── README.md
└── README_zh.md./freebuff2api -config /path/to/config.json-config