Loading...
Loading...
Core patterns for AI coding agents based on analysis of Claude Code, Codex, Cline, Aider, OpenCode. Triggers when: Building an AI coding agent or assistant, implementing tool-calling loops, managing context windows for LLMs, setting up agent memory or skill systems, or designing multi-provider LLM abstraction. Capabilities: Core agent loop with while(true) and tool execution, context management with pruning and compression and repo maps, tool safety with sandboxing and approval flows and doom loop detection, multi-provider abstraction with unified API for different LLMs, memory systems with project rules and auto-memory and skill loading, session persistence with SQLite vs JSONL patterns.
npx skill4agent add cycleuser/skills coding-agent-patternsreadwriteeditbashglobgrepdef compress_context(history: list, budget: int) -> list:
"""Compress history when approaching context limit."""
usage = count_tokens(history)
if usage < budget * 0.8:
return history
# Keep recent turns, summarize older ones
recent = history[-10:] # Last 10 turns
older = history[:-10]
summary = llm.summarize(older)
return [{"role": "system", "content": f"Previous context summary:\n{summary}"}] + recentdef build_repo_map(repo_path: Path) -> str:
"""Build a 'map' of the codebase with just signatures."""
import tree_sitter
map_lines = []
for file in repo_path.rglob("*.py"):
# Parse and extract: class names, function signatures, imports
signatures = extract_signatures(file)
map_lines.append(f"{file}:\n{signatures}")
return "\n".join(map_lines) # Much smaller than full code// Use landlock + seccomp for OS-level sandboxing
fn sandbox_restrict(allowed_paths: &[PathBuf]) -> Result<()> {
// Limit file access to allowed paths
// Block dangerous syscalls
// Three modes: suggest-only, auto-edit, full-auto
}TOOL_TIERS = {
"read": "safe", # No approval needed
"write": "needs_approval", # User confirms
"bash": "restricted", # Blacklist + approval
}
def execute_tool(name: str, args: dict) -> Result:
tier = TOOL_TIERS.get(name, "safe")
if tier == "needs_approval":
if not user_approves(name, args):
return Result(cancelled=True)
if tier == "restricted":
if is_dangerous(args):
return Result(error="Command blocked")
return run_tool(name, args)def detect_doom_loop(history: list) -> bool:
"""Detect if agent is stuck repeating the same action."""
if len(history) < 3:
return False
last_three = history[-3:]
# Check if same tool called 3 times with identical args
if all_same_tool_and_args(last_three):
return True # Pause and ask user
return Falsecontent: stringfunction_callcontent: blocks[]tool_useparts[]function_callclass BaseLLMClient(ABC):
@abstractmethod
def chat(self, messages: list, tools: list) -> Response: ...
@abstractmethod
def chat_stream(self, messages: list, tools: list) -> Iterator[Chunk]: ...
class OpenAIClient(BaseLLMClient):
def chat(self, messages, tools):
return self.client.chat.completions.create(
model=self.model, messages=messages, tools=tools
)
class AnthropicClient(BaseLLMClient):
def chat(self, messages, tools):
return self.client.messages.create(
model=self.model, messages=messages, tools=tools
)
def get_client(provider: str, model: str) -> BaseLLMClient:
clients = {
"openai": OpenAIClient,
"anthropic": AnthropicClient,
"ollama": OllamaClient,
}
return clients[provider](model)async def agent_loop_with_retry(max_retries: int = 32):
for attempt in range(max_retries):
try:
return await agent_loop()
except RateLimitError:
await sleep(60 * (2 ** attempt)) # Exponential backoff
except AuthError:
rotate_api_key() # Inner retry
except ContextOverflowError:
compress_context() # Middle retry
except NetworkError:
continue # Immediate retry
except FatalError:
rebuild_session() # Outer retrydef save_session(session_id: str, event: dict):
"""Append event to session log file."""
log_file = Path.home() / ".agent" / "sessions" / f"{session_id}.jsonl"
with open(log_file, "a") as f:
f.write(json.dumps(event) + "\n")
def load_session(session_id: str) -> list:
"""Load all events from session."""
log_file = Path.home() / ".agent" / "sessions" / f"{session_id}.jsonl"
events = []
with open(log_file) as f:
for line in f:
events.append(json.loads(line))
return eventsdef init_shadow_git(project_path: Path):
"""Create hidden git repo for undo history."""
shadow_path = project_path / ".agent-shadow-git"
run(["git", "init"], cwd=shadow_path)
def snapshot_after_tool(shadow_path: Path):
"""Auto-commit after each tool execution."""
run(["git", "add", "-A"], cwd=shadow_path)
run(["git", "commit", "-m", "snapshot"], cwd=shadow_path)
def undo_to_snapshot(shadow_path: Path, commit_hash: str):
"""Restore to any previous state."""
run(["git", "checkout", commit_hash], cwd=shadow_path)CLAUDE.md.claude/rules/.opencode/skills/.clinerulesCONVENTIONS.md/readAGENTS.md.opencode/
├── skills/
│ ├── git-release/
│ │ └── SKILL.md
│ └── code-review/
│ └── SKILL.md
└── agents/
└── reviewer.md # Specialized agent definition<!-- .opencode/agents/reviewer.md -->
---
description: Code review agent, read-only
mode: subagent
tools:
write: false
edit: false
---
You are a code review expert. Analyze code, suggest improvements, never modify files.def learn_from_correction(user_feedback: str, context: dict):
"""Store user corrections for future reference."""
memory_file = Path.home() / ".claude" / "auto_memory.json"
memories = json.loads(memory_file.read_text())
memories.append({
"feedback": user_feedback,
"context": context,
"timestamp": datetime.now().isoformat(),
})
memory_file.write_text(json.dumps(memories, indent=2))
def build_system_prompt() -> str:
"""Include learned preferences in system prompt."""
memory_file = Path.home() / ".claude" / "auto_memory.json"
if memory_file.exists():
memories = json.loads(memory_file.read_text())
return f"User preferences:\n{format_memories(memories)}"
return ""