Loading...
Loading...
Use when caching expensive file processing results. SHA-256 content-hash keying with frozen CacheEntry and service layer wrapper.
npx skill4agent add shimo4228/claude-code-learned-skills content-hash-cache-pattern# WRONG: 毎回フルパイプライン実行
def process_file(path: Path) -> Result:
return expensive_extraction(path) # Always re-runs
# WRONG: パスベースキャッシュ(ファイル移動で無効化)
cache = {"/path/to/file.pdf": result} # Path changes → cache miss
# WRONG: 既存関数にキャッシュパラメータ追加(SRP違反)
def extract_text(path, *, cache_enabled=False, cache_dir=None):
if cache_enabled: # Extraction function now has cache responsibility
...import hashlib
from pathlib import Path
_HASH_CHUNK_SIZE = 65536 # 64KB chunks for large files
def compute_file_hash(path: Path) -> str:
"""SHA-256 of file contents (chunked for large files)."""
if not path.is_file():
raise FileNotFoundError(f"File not found: {path}")
sha256 = hashlib.sha256()
with open(path, "rb") as f:
while True:
chunk = f.read(_HASH_CHUNK_SIZE)
if not chunk:
break
sha256.update(chunk)
return sha256.hexdigest()from dataclasses import dataclass
@dataclass(frozen=True, slots=True)
class CacheEntry:
file_hash: str
source_path: str
document: ExtractedDocument # The cached resultdataclasses.asdict()import json
from typing import Any
def _serialize_entry(entry: CacheEntry) -> dict[str, Any]:
"""Manual mapping for full control over serialized format."""
doc = entry.document
return {
"file_hash": entry.file_hash,
"source_path": entry.source_path,
"document": {
"text": doc.text,
"chunks": list(doc.chunks), # tuple → list for JSON
"file_type": doc.file_type,
# ... other fields
},
}
def _deserialize_entry(data: dict[str, Any]) -> CacheEntry:
doc_data = data["document"]
document = ExtractedDocument(
text=doc_data["text"],
chunks=tuple(doc_data["chunks"]), # list → tuple
file_type=doc_data["file_type"],
)
return CacheEntry(
file_hash=data["file_hash"],
source_path=data["source_path"],
document=document,
)# service.py — cache wrapper
def extract_with_cache(file_path: Path, *, config: AppConfig) -> ExtractedDocument:
"""Service layer: cache check → extraction → cache write."""
if not config.cache_enabled:
return extract_text(file_path) # Pure function, no cache knowledge
cache_dir = Path(config.cache_dir)
file_hash = compute_file_hash(file_path)
# Check cache
cached = read_cache(cache_dir, file_hash)
if cached is not None:
logger.info("Cache hit: %s (hash=%s)", file_path.name, file_hash[:12])
return cached.document
# Cache miss → extract → store
logger.info("Cache miss: %s (hash=%s)", file_path.name, file_hash[:12])
doc = extract_text(file_path)
entry = CacheEntry(file_hash=file_hash, source_path=str(file_path), document=doc)
write_cache(cache_dir, entry)
return docdef read_cache(cache_dir: Path, file_hash: str) -> CacheEntry | None:
cache_file = cache_dir / f"{file_hash}.json"
if not cache_file.is_file():
return None
try:
raw = cache_file.read_text(encoding="utf-8")
data = json.loads(raw)
return _deserialize_entry(data)
except (json.JSONDecodeError, ValueError, KeyError):
logger.warning("Corrupted cache entry: %s", cache_file)
return None # Treat corruption as cache miss| Choice / 選択 | Reason / 理由 |
|---|---|
| SHA-256 content hash | Path-independent, auto-invalidates on content change |
| O(1) lookup, no index file needed |
| Service layer wrapper | SRP: extraction stays pure, cache is separate concern |
| Manual JSON serialization | Full control over frozen dataclass serialization |
| Corruption → None | Graceful degradation, re-extracts on next run |
| Lazy directory creation on first write |
--cache/--no-cachepython-immutable-accumulator.mdbackward-compatible-frozen-extension.mdcost-aware-llm-pipeline.md