Loading...
Loading...
Advanced memory operations reference. Basic patterns (profile loading, simple recall/remember) are in project instructions. Consult this skill for background writes, memory versioning, complex queries, edge cases, session scoping, retention management, type-safe results, proactive memory hints, GitHub access detection, and ops priority ordering.
npx skill4agent add oaustegard/claude-skills remembering| Table | Purpose | Growth |
|---|---|---|
| Stable operational state (profile + ops + journal) | Small, mostly static |
| Timestamped observations | Unbounded |
from scripts import boot
print(boot())# CAPABILITIES| Type | Use For | Defaults |
|---|---|---|
| Explicit choices: prefers X, always/never do Y | conf=0.8 |
| External facts: tasks, deadlines, project state | |
| Errors, bugs, unexpected behavior | |
| General observations, catch-all | |
| Workflows, step-by-step processes, decision trees | conf=0.9, priority=1 |
from scripts import TYPES # {'decision', 'world', 'anomaly', 'experience', 'procedure'}from scripts import remember
# Store a workflow
id = remember(
"Deploy workflow: 1) Run tests 2) Build artifacts 3) Push to staging 4) Smoke test 5) Promote to prod",
"procedure",
tags=["deployment", "workflow"],
)
# Retrieve workflows
procedures = recall(type="procedure", tags=["deployment"])confidence=0.9priority=1from scripts import remember, remember_bg, flush
# Blocking write (default)
id = remember("User prefers dark mode", "decision", tags=["ui"], conf=0.9)
# Background write (non-blocking)
remember("Quick note", "world", sync=False)
# Ensure all background writes complete before conversation ends
flush()from scripts import recall
# FTS5 search with BM25 ranking + Porter stemmer
memories = recall("dark mode")
# Filtered queries
decisions = recall(type="decision", conf=0.85, n=20)
tasks = recall("API", tags=["task"], n=15)
urgent = recall(tags=["task", "urgent"], tag_mode="all", n=10)
# Comprehensive retrieval (v4.1.0)
all_memories = recall(fetch_all=True, n=1000) # Get all memories without search filtering
# Time-windowed queries (v4.3.0) - since/until with inclusive bounds
recent = recall("API", since="2025-02-01")
jan_memories = recall(since="2025-01-01", until="2025-01-31T23:59:59Z")
# Multi-tag convenience (v4.3.0)
both = recall(tags_all=["correction", "bsky"]) # AND: must have all tags
either = recall(tags_any=["therapy", "self-improvement"]) # OR: any tag matches
# Wildcard patterns are NOT supported - use fetch_all instead
# recall("*", n=1000) # ❌ Raises ValueError
# recall(fetch_all=True, n=1000) # ✅ Correct approachMemoryResultm.contentm.summarym.confm.confidencefrom scripts import remember, get_alternatives
# Store decision with alternatives considered
id = remember(
"Chose PostgreSQL for the database",
"decision",
tags=["architecture", "database"],
alternatives=[
{"option": "MongoDB", "rejected": "Schema-less adds complexity for our relational data"},
{"option": "SQLite", "rejected": "Doesn't support concurrent writes at our scale"},
]
)
# Later: retrieve what was considered
alts = get_alternatives(id)
for alt in alts:
print(f"Rejected {alt['option']}: {alt.get('rejected', 'no reason')}")refsalternativesMemoryResultfrom scripts import get_chain
# Follow refs up to 3 levels deep (default)
chain = get_chain("memory-uuid", depth=3)
for m in chain:
print(f"[depth={m['_chain_depth']}] {m['summary'][:80]}")
# Useful for understanding supersede chains, consolidated memory origins, etc.from scripts import recall_batch, remember_batch
# Multiple searches in one call (uses server-side FTS5 with BM25 ranking)
results = recall_batch(["architecture", "turso", "FTS5"], n=5)
for i, result_set in enumerate(results):
print(f"Query {i}: {len(result_set)} results")
# Multiple stores in one call
ids = remember_batch([
{"what": "User prefers dark mode", "type": "decision", "tags": ["ui"]},
{"what": "Project uses React", "type": "world", "tags": ["tech"]},
{"what": "Found auth bug", "type": "anomaly", "conf": 0.7},
])recall_batch()recall()remember_batch(){"error": str}from scripts import forget, supersede
# Soft delete (sets deleted_at, excluded from queries)
forget("memory-uuid")
# Version without losing history
supersede(original_id, "User now prefers Python 3.12", "decision", conf=0.9)from scripts import config_get, config_set, config_delete, config_list, profile, ops
# Read
config_get("identity") # Single key
profile() # All profile entries
ops() # All ops entries
config_list() # Everything
# Write
config_set("new-key", "value", "profile") # Category: 'profile', 'ops', or 'journal'
config_set("bio", "Short bio here", "profile", char_limit=500) # Enforce max length
config_set("core-rule", "Never modify this", "ops", read_only=True) # Mark immutable
# Delete
config_delete("old-key")from scripts import journal, journal_recent, journal_prune
# Record what happened this interaction
journal(
topics=["project-x", "debugging"],
user_stated="Will review PR tomorrow",
my_intent="Investigating memory leak"
)
# Boot: load recent entries for context
for entry in journal_recent(10):
print(f"[{entry['t'][:10]}] {entry.get('topics', [])}: {entry.get('my_intent', '')}")
# Maintenance: keep last 40 entries
pruned = journal_prune(keep=40)remember(..., sync=False)flush()from scripts import remember, flush
remember("Derived insight", "experience", sync=False)
remember("Another note", "world", sync=False)
# Before conversation ends:
flush() # Blocks until all background writes finishremember_bg()remember(..., sync=False)MemoryResultList([])tags=["task"]["task", "urgent"]decisionValueErrortag_mode="all"tag_mode="any"expansion_thresholdexpansion_threshold=0/mnt/project/turso.env/mnt/project/muninn.env~/.muninn/.envscripts/defaults/from scripts import session_save, session_resume, sessions
# Save a checkpoint before ending session
session_save("Implementing FTS5 search", context={"files": ["cache.py"], "status": "in-progress"})
# In a new session: resume from last checkpoint
checkpoint = session_resume("previous-session-id")
print(checkpoint['summary']) # What was happening
print(checkpoint['context']) # Custom context data
print(len(checkpoint['recent_memories'])) # Recent memories from that session
# List available session checkpoints
for s in sessions():
print(f"{s['session_id']}: {s['summary'][:60]}")from scripts import consolidate
# Preview what would be consolidated
result = consolidate(dry_run=True)
for c in result['clusters']:
print(f"Tag '{c['tag']}': {c['count']} memories")
# Actually consolidate (creates summaries, demotes originals to background)
result = consolidate(dry_run=False, min_cluster=3)
print(f"Consolidated {result['consolidated']} clusters, demoted {result['demoted']} memories")
# Scope to specific tags
result = consolidate(tags=["debugging"], dry_run=False)min_clustertype=worldconsolidatedpriority=-1refsfrom scripts import therapy_reflect
# Preview discovered patterns without creating memories
result = therapy_reflect(dry_run=True)
for c in result['clusters']:
print(f"Pattern ({len(c['source_ids'])} episodes): {c['pattern'][:80]}")
print(f" Common tags: {c['tags']}")
# Create semantic memories from patterns
result = therapy_reflect(dry_run=False)
print(f"Created {result['created']} pattern memories from {len(result['clusters'])} clusters")type=experiencerecall()type=worldreflectioncross-episodicrefsrecall_sincerecall_betweensinceuntilstrengthenweakensession_savesession_resumesessionsrecall_hintsget_alternativesconsolidateget_chainrecall_batchremember_batch