Loading...
Loading...
Proactive context window management via token monitoring, intelligent extraction, and selective rehydration. Features predictive budget monitoring, context health indicators, and priority-based retention. Use when approaching token limits or needing to preserve essential context. Complements /transcripts and PreCompact hook with proactive optimization.
npx skill4agent add rysweet/amplihack context-managementUser: Check my current token usagecontext_managerfrom context_manager import check_context_status
status = check_context_status(current_tokens=<current_count>)
# Returns: ContextStatus with usage percentage and recommendationsUser: Create a context snapshot named "auth-implementation"context_managerfrom context_manager import create_context_snapshot
snapshot = create_context_snapshot(
conversation_data=<conversation_history>,
name="auth-implementation"
)
# Returns: ContextSnapshot with snapshot_id, file_path, and token_countUser: Restore context from snapshot <snapshot_id> at essential levelcontext_managerfrom context_manager import rehydrate_from_snapshot
context = rehydrate_from_snapshot(
snapshot_id="20251116_143522",
level="essential" # or "standard" or "comprehensive"
)
# Returns: Formatted context text ready to processUser: List my context snapshotscontext_managerfrom context_manager import list_context_snapshots
snapshots = list_context_snapshots()
# Returns: List of snapshot metadata dictsstatusfrom context_manager import check_context_status
status = check_context_status(current_tokens=750000)
print(f"Usage: {status.percentage}%")
print(f"Status: {status.threshold_status}")
print(f"Recommendation: {status.recommendation}")ContextStatusthreshold_statusrecommendationsnapshotfrom context_manager import create_context_snapshot
snapshot = create_context_snapshot(
conversation_data=messages,
name="feature-name" # Optional
)
print(f"Snapshot ID: {snapshot.snapshot_id}")
print(f"Token count: {snapshot.token_count}")
print(f"Saved to: {snapshot.file_path}")ContextSnapshot~/.amplihack/.claude/runtime/context-snapshots/rehydratefrom context_manager import rehydrate_from_snapshot
context = rehydrate_from_snapshot(
snapshot_id="20251116_143522",
level="standard" # essential, standard, or comprehensive
)
print(context) # Display restored contextlistfrom context_manager import list_context_snapshots
snapshots = list_context_snapshots()
for snapshot in snapshots:
print(f"{snapshot['id']}: {snapshot['name']} ({snapshot['size']})")# The system tracks token burn rate over time
# When checking status, you get predictive insights
status = check_context_status(current_tokens=500000)
# Status includes predictions (when automation is running):
# - Estimated tool uses until 70% threshold
# - Time estimate based on current burn rate
# - Early warning before you hit capacity
# Example output interpretation:
# "At current rate, you'll hit 70% in ~15 tool uses"
# "Consider creating a checkpoint before your next major operation"| Indicator | Meaning | Usage % | Recommended Action |
|---|---|---|---|
| Healthy | 0-30% | Continue normally |
| Monitor | 30-50% | Plan checkpoint |
| Warning | 50-70% | Create snapshot soon |
| Critical | 70%+ | Snapshot immediately |
# In your statusline script, check context health:
# The automation state file contains health status
# Example statusline addition:
if [ -f ".claude/runtime/context-automation-state.json" ]; then
LAST_PCT=$(jq -r '.last_percentage // 0' .claude/runtime/context-automation-state.json)
if [ "$LAST_PCT" -lt 30 ]; then
echo "[CTX:OK]"
elif [ "$LAST_PCT" -lt 50 ]; then
echo "[CTX:WATCH]"
elif [ "$LAST_PCT" -lt 70 ]; then
echo "[CTX:WARN]"
else
echo "[CTX:CRITICAL]"
fi
fi# Create snapshot with priority awareness
snapshot = create_context_snapshot(
conversation_data=messages,
name='feature-checkpoint'
)
# Essential level (~200 tokens): Only high priority content
# Standard level (~800 tokens): High + medium priority
# Comprehensive level (~1250 tokens): Everything
# Start minimal, upgrade as needed:
context = rehydrate_from_snapshot(snapshot_id, level='essential')# The automation tracks consumption velocity
# Adaptive checking frequency based on burn rate:
# Low burn rate (< 1K tokens/tool): Check every 50 tools
# Medium burn rate (1-5K tokens/tool): Check every 10 tools
# High burn rate (> 5K tokens/tool): Check every 3 tools
# Critical zone (70%+): Check every tool
# This means:
# - Normal development: Minimal overhead (checks rarely)
# - Large file operations: Increased monitoring
# - Approaching limits: Continuous monitoring| Burn Rate | Risk Level | Monitoring Frequency |
|---|---|---|
| < 1K/tool | Low | Every 50 tools |
| 1-5K/tool | Medium | Every 10 tools |
| > 5K/tool | High | Every 3 tools |
| Any at 70%+ | Critical | Every tool |
# Automatic snapshot triggers (already implemented):
# - 30% usage: First checkpoint created
# - 40% usage: Second checkpoint created
# - 50% usage: Third checkpoint created (for 1M models)
# For smaller context windows (< 800K):
# - 55% usage: First checkpoint
# - 70% usage: Second checkpoint
# - 85% usage: Urgent checkpoint
# After compaction detected (30% token drop):
# - Automatically rehydrates from most recent snapshot
# - Uses smart level selection based on previous usagestatus = check_context_status(current_tokens=current)
if status.threshold_status == 'consider':
# Usage at 70%+ - consider creating snapshot
print("Consider creating a snapshot soon")
elif status.threshold_status == 'recommended':
# Usage at 85%+ - snapshot recommended
create_context_snapshot(messages, name='current-work')
elif status.threshold_status == 'urgent':
# Usage at 95%+ - create snapshot immediately
create_context_snapshot(messages, name='urgent-backup')snapshot = create_context_snapshot(
conversation_data=messages,
name='descriptive-name'
)
# Save snapshot ID for later rehydration/transcripts# Start minimal
context = rehydrate_from_snapshot(
snapshot_id='20251116_143522',
level='essential'
)
# If more context needed, upgrade to standard
context = rehydrate_from_snapshot(
snapshot_id='20251116_143522',
level='standard'
)
# For complete context, use comprehensive
context = rehydrate_from_snapshot(
snapshot_id='20251116_143522',
level='comprehensive'
)~/.amplihack/.claude/runtime/context-snapshots/~/.amplihack/.claude/runtime/logs/<session_id>/CONVERSATION_TRANSCRIPT.md~/.amplihack/.claude/tools/amplihack/context_manager.py~/.amplihack/.claude/tools/amplihack/context_automation_hook.py~/.amplihack/.claude/tools/amplihack/hooks/tool_registry.pystatus = check_context_status(current_tokens=current)
if status.threshold_status in ['recommended', 'urgent']:
create_context_snapshot(messages, name='before-refactoring')# Pausing work on Feature A
create_context_snapshot(messages, name='feature-a-paused')
# [... work on Feature B ...]
# Resume Feature A later
context = rehydrate_from_snapshot('feature-a-snapshot-id', level='standard')snapshot = create_context_snapshot(
messages,
name='handoff-to-alice-api-work'
)
# Share snapshot ID with teammate
# Alice can rehydrate and continue work~/.amplihack/.claude/tools/amplihack/context_manager.py~/.amplihack/.claude/tools/amplihack/context_automation_hook.py~/.amplihack/.claude/context/PHILOSOPHY.md~/.amplihack/.claude/context/PATTERNS.mdcontext_manager.py