mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-28 06:51:16 +08:00
feat: filesystem checkpoints and /rollback command
Automatic filesystem snapshots before destructive file operations, with user-facing rollback. Inspired by PR #559 (by @alireza78a). Architecture: - Shadow git repos at ~/.hermes/checkpoints/{hash}/ via GIT_DIR - CheckpointManager: take/list/restore, turn-scoped dedup, pruning - Transparent — the LLM never sees it, no tool schema, no tokens - Once per turn — only first write_file/patch triggers a snapshot Integration: - Config: checkpoints.enabled + checkpoints.max_snapshots - CLI flag: hermes --checkpoints - Trigger: run_agent.py _execute_tool_calls() before write_file/patch - /rollback slash command in CLI + gateway (list, restore by number) - Pre-rollback snapshot auto-created on restore (undo the undo) Safety: - Never blocks file operations — all errors silently logged - Skips root dir, home dir, dirs >50K files - Disables gracefully when git not installed - Shadow repo completely isolated from project git Tests: 35 new tests, all passing (2798 total suite) Docs: feature page, config reference, CLI commands reference
This commit is contained in:
@@ -771,7 +771,7 @@ class GatewayRunner:
|
||||
_known_commands = {"new", "reset", "help", "status", "stop", "model",
|
||||
"personality", "retry", "undo", "sethome", "set-home",
|
||||
"compress", "usage", "insights", "reload-mcp", "reload_mcp",
|
||||
"update", "title", "resume", "provider"}
|
||||
"update", "title", "resume", "provider", "rollback"}
|
||||
if command and command in _known_commands:
|
||||
await self.hooks.emit(f"command:{command}", {
|
||||
"platform": source.platform.value if source.platform else "",
|
||||
@@ -830,6 +830,9 @@ class GatewayRunner:
|
||||
|
||||
if command == "resume":
|
||||
return await self._handle_resume_command(event)
|
||||
|
||||
if command == "rollback":
|
||||
return await self._handle_rollback_command(event)
|
||||
|
||||
# Skill slash commands: /skill-name loads the skill and sends to agent
|
||||
if command:
|
||||
@@ -1400,6 +1403,7 @@ class GatewayRunner:
|
||||
"`/resume [name]` — Resume a previously-named session",
|
||||
"`/usage` — Show token usage for this session",
|
||||
"`/insights [days]` — Show usage insights and analytics",
|
||||
"`/rollback [number]` — List or restore filesystem checkpoints",
|
||||
"`/reload-mcp` — Reload MCP servers from config",
|
||||
"`/update` — Update Hermes Agent to the latest version",
|
||||
"`/help` — Show this message",
|
||||
@@ -1746,6 +1750,65 @@ class GatewayRunner:
|
||||
f"Cron jobs and cross-platform messages will be delivered here."
|
||||
)
|
||||
|
||||
async def _handle_rollback_command(self, event: MessageEvent) -> str:
|
||||
"""Handle /rollback command — list or restore filesystem checkpoints."""
|
||||
from tools.checkpoint_manager import CheckpointManager, format_checkpoint_list
|
||||
|
||||
# Read checkpoint config from config.yaml
|
||||
cp_cfg = {}
|
||||
try:
|
||||
import yaml as _y
|
||||
_cfg_path = _hermes_home / "config.yaml"
|
||||
if _cfg_path.exists():
|
||||
with open(_cfg_path, encoding="utf-8") as _f:
|
||||
_data = _y.safe_load(_f) or {}
|
||||
cp_cfg = _data.get("checkpoints", {})
|
||||
if isinstance(cp_cfg, bool):
|
||||
cp_cfg = {"enabled": cp_cfg}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not cp_cfg.get("enabled", False):
|
||||
return (
|
||||
"Checkpoints are not enabled.\n"
|
||||
"Enable in config.yaml:\n```\ncheckpoints:\n enabled: true\n```"
|
||||
)
|
||||
|
||||
mgr = CheckpointManager(
|
||||
enabled=True,
|
||||
max_snapshots=cp_cfg.get("max_snapshots", 50),
|
||||
)
|
||||
|
||||
cwd = os.getenv("MESSAGING_CWD", str(Path.home()))
|
||||
arg = event.get_command_args().strip()
|
||||
|
||||
if not arg:
|
||||
checkpoints = mgr.list_checkpoints(cwd)
|
||||
return format_checkpoint_list(checkpoints, cwd)
|
||||
|
||||
# Restore by number or hash
|
||||
checkpoints = mgr.list_checkpoints(cwd)
|
||||
if not checkpoints:
|
||||
return f"No checkpoints found for {cwd}"
|
||||
|
||||
target_hash = None
|
||||
try:
|
||||
idx = int(arg) - 1
|
||||
if 0 <= idx < len(checkpoints):
|
||||
target_hash = checkpoints[idx]["hash"]
|
||||
else:
|
||||
return f"Invalid checkpoint number. Use 1-{len(checkpoints)}."
|
||||
except ValueError:
|
||||
target_hash = arg
|
||||
|
||||
result = mgr.restore(cwd, target_hash)
|
||||
if result["success"]:
|
||||
return (
|
||||
f"✅ Restored to checkpoint {result['restored_to']}: {result['reason']}\n"
|
||||
f"A pre-rollback snapshot was saved automatically."
|
||||
)
|
||||
return f"❌ {result['error']}"
|
||||
|
||||
async def _handle_compress_command(self, event: MessageEvent) -> str:
|
||||
"""Handle /compress command -- manually compress conversation context."""
|
||||
source = event.source
|
||||
|
||||
@@ -489,6 +489,7 @@ def cmd_chat(args):
|
||||
"query": args.query,
|
||||
"resume": getattr(args, "resume", None),
|
||||
"worktree": getattr(args, "worktree", False),
|
||||
"checkpoints": getattr(args, "checkpoints", False),
|
||||
}
|
||||
# Filter out None values
|
||||
kwargs = {k: v for k, v in kwargs.items() if v is not None}
|
||||
@@ -1927,6 +1928,12 @@ For more help on a command:
|
||||
default=False,
|
||||
help="Run in an isolated git worktree (for parallel agents on the same repo)"
|
||||
)
|
||||
chat_parser.add_argument(
|
||||
"--checkpoints",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Enable filesystem checkpoints before destructive file operations (use /rollback to restore)"
|
||||
)
|
||||
chat_parser.set_defaults(func=cmd_chat)
|
||||
|
||||
# =========================================================================
|
||||
|
||||
24
run_agent.py
24
run_agent.py
@@ -185,6 +185,8 @@ class AIAgent:
|
||||
honcho_session_key: str = None,
|
||||
iteration_budget: "IterationBudget" = None,
|
||||
fallback_model: Dict[str, Any] = None,
|
||||
checkpoints_enabled: bool = False,
|
||||
checkpoint_max_snapshots: int = 50,
|
||||
):
|
||||
"""
|
||||
Initialize the AI Agent.
|
||||
@@ -486,6 +488,13 @@ class AIAgent:
|
||||
# Cached system prompt -- built once per session, only rebuilt on compression
|
||||
self._cached_system_prompt: Optional[str] = None
|
||||
|
||||
# Filesystem checkpoint manager (transparent — not a tool)
|
||||
from tools.checkpoint_manager import CheckpointManager
|
||||
self._checkpoint_mgr = CheckpointManager(
|
||||
enabled=checkpoints_enabled,
|
||||
max_snapshots=checkpoint_max_snapshots,
|
||||
)
|
||||
|
||||
# SQLite session store (optional -- provided by CLI or gateway)
|
||||
self._session_db = session_db
|
||||
if self._session_db:
|
||||
@@ -2706,6 +2715,18 @@ class AIAgent:
|
||||
except Exception as cb_err:
|
||||
logging.debug(f"Tool progress callback error: {cb_err}")
|
||||
|
||||
# Checkpoint: snapshot working dir before file-mutating tools
|
||||
if function_name in ("write_file", "patch") and self._checkpoint_mgr.enabled:
|
||||
try:
|
||||
file_path = function_args.get("path", "")
|
||||
if file_path:
|
||||
work_dir = self._checkpoint_mgr.get_working_dir_for_path(file_path)
|
||||
self._checkpoint_mgr.ensure_checkpoint(
|
||||
work_dir, f"before {function_name}"
|
||||
)
|
||||
except Exception:
|
||||
pass # never block tool execution
|
||||
|
||||
tool_start_time = time.time()
|
||||
|
||||
if function_name == "todo":
|
||||
@@ -3215,6 +3236,9 @@ class AIAgent:
|
||||
self.clear_interrupt()
|
||||
|
||||
while api_call_count < self.max_iterations and self.iteration_budget.remaining > 0:
|
||||
# Reset per-turn checkpoint dedup so each iteration can take one snapshot
|
||||
self._checkpoint_mgr.new_turn()
|
||||
|
||||
# Check for interrupt request (e.g., user sent new message)
|
||||
if self._interrupt_requested:
|
||||
interrupted = True
|
||||
|
||||
385
tests/tools/test_checkpoint_manager.py
Normal file
385
tests/tools/test_checkpoint_manager.py
Normal file
@@ -0,0 +1,385 @@
|
||||
"""Tests for tools/checkpoint_manager.py — CheckpointManager."""
|
||||
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
from tools.checkpoint_manager import (
|
||||
CheckpointManager,
|
||||
_shadow_repo_path,
|
||||
_init_shadow_repo,
|
||||
_run_git,
|
||||
_git_env,
|
||||
_dir_file_count,
|
||||
format_checkpoint_list,
|
||||
DEFAULT_EXCLUDES,
|
||||
CHECKPOINT_BASE,
|
||||
)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Fixtures
|
||||
# =========================================================================
|
||||
|
||||
@pytest.fixture()
|
||||
def work_dir(tmp_path):
|
||||
"""Temporary working directory."""
|
||||
d = tmp_path / "project"
|
||||
d.mkdir()
|
||||
(d / "main.py").write_text("print('hello')\\n")
|
||||
(d / "README.md").write_text("# Project\\n")
|
||||
return d
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def checkpoint_base(tmp_path):
|
||||
"""Isolated checkpoint base — never writes to ~/.hermes/."""
|
||||
return tmp_path / "checkpoints"
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def mgr(work_dir, checkpoint_base, monkeypatch):
|
||||
"""CheckpointManager with redirected checkpoint base."""
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
return CheckpointManager(enabled=True, max_snapshots=50)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def disabled_mgr(checkpoint_base, monkeypatch):
|
||||
"""Disabled CheckpointManager."""
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
return CheckpointManager(enabled=False)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Shadow repo path
|
||||
# =========================================================================
|
||||
|
||||
class TestShadowRepoPath:
|
||||
def test_deterministic(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
p1 = _shadow_repo_path(str(work_dir))
|
||||
p2 = _shadow_repo_path(str(work_dir))
|
||||
assert p1 == p2
|
||||
|
||||
def test_different_dirs_different_paths(self, tmp_path, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
p1 = _shadow_repo_path(str(tmp_path / "a"))
|
||||
p2 = _shadow_repo_path(str(tmp_path / "b"))
|
||||
assert p1 != p2
|
||||
|
||||
def test_under_checkpoint_base(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
p = _shadow_repo_path(str(work_dir))
|
||||
assert str(p).startswith(str(checkpoint_base))
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Shadow repo init
|
||||
# =========================================================================
|
||||
|
||||
class TestShadowRepoInit:
|
||||
def test_creates_git_repo(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
shadow = _shadow_repo_path(str(work_dir))
|
||||
err = _init_shadow_repo(shadow, str(work_dir))
|
||||
assert err is None
|
||||
assert (shadow / "HEAD").exists()
|
||||
|
||||
def test_no_git_in_project_dir(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
shadow = _shadow_repo_path(str(work_dir))
|
||||
_init_shadow_repo(shadow, str(work_dir))
|
||||
assert not (work_dir / ".git").exists()
|
||||
|
||||
def test_has_exclude_file(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
shadow = _shadow_repo_path(str(work_dir))
|
||||
_init_shadow_repo(shadow, str(work_dir))
|
||||
exclude = shadow / "info" / "exclude"
|
||||
assert exclude.exists()
|
||||
content = exclude.read_text()
|
||||
assert "node_modules/" in content
|
||||
assert ".env" in content
|
||||
|
||||
def test_has_workdir_file(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
shadow = _shadow_repo_path(str(work_dir))
|
||||
_init_shadow_repo(shadow, str(work_dir))
|
||||
workdir_file = shadow / "HERMES_WORKDIR"
|
||||
assert workdir_file.exists()
|
||||
assert str(work_dir.resolve()) in workdir_file.read_text()
|
||||
|
||||
def test_idempotent(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
shadow = _shadow_repo_path(str(work_dir))
|
||||
err1 = _init_shadow_repo(shadow, str(work_dir))
|
||||
err2 = _init_shadow_repo(shadow, str(work_dir))
|
||||
assert err1 is None
|
||||
assert err2 is None
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# CheckpointManager — disabled
|
||||
# =========================================================================
|
||||
|
||||
class TestDisabledManager:
|
||||
def test_ensure_checkpoint_returns_false(self, disabled_mgr, work_dir):
|
||||
assert disabled_mgr.ensure_checkpoint(str(work_dir)) is False
|
||||
|
||||
def test_new_turn_works(self, disabled_mgr):
|
||||
disabled_mgr.new_turn() # should not raise
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# CheckpointManager — taking checkpoints
|
||||
# =========================================================================
|
||||
|
||||
class TestTakeCheckpoint:
|
||||
def test_first_checkpoint(self, mgr, work_dir):
|
||||
result = mgr.ensure_checkpoint(str(work_dir), "initial")
|
||||
assert result is True
|
||||
|
||||
def test_dedup_same_turn(self, mgr, work_dir):
|
||||
r1 = mgr.ensure_checkpoint(str(work_dir), "first")
|
||||
r2 = mgr.ensure_checkpoint(str(work_dir), "second")
|
||||
assert r1 is True
|
||||
assert r2 is False # dedup'd
|
||||
|
||||
def test_new_turn_resets_dedup(self, mgr, work_dir):
|
||||
r1 = mgr.ensure_checkpoint(str(work_dir), "turn 1")
|
||||
assert r1 is True
|
||||
|
||||
mgr.new_turn()
|
||||
|
||||
# Modify a file so there's something to commit
|
||||
(work_dir / "main.py").write_text("print('modified')\\n")
|
||||
r2 = mgr.ensure_checkpoint(str(work_dir), "turn 2")
|
||||
assert r2 is True
|
||||
|
||||
def test_no_changes_skips_commit(self, mgr, work_dir):
|
||||
# First checkpoint
|
||||
mgr.ensure_checkpoint(str(work_dir), "initial")
|
||||
mgr.new_turn()
|
||||
|
||||
# No file changes — should return False (nothing to commit)
|
||||
r = mgr.ensure_checkpoint(str(work_dir), "no changes")
|
||||
assert r is False
|
||||
|
||||
def test_skip_root_dir(self, mgr):
|
||||
r = mgr.ensure_checkpoint("/", "root")
|
||||
assert r is False
|
||||
|
||||
def test_skip_home_dir(self, mgr):
|
||||
r = mgr.ensure_checkpoint(str(Path.home()), "home")
|
||||
assert r is False
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# CheckpointManager — listing checkpoints
|
||||
# =========================================================================
|
||||
|
||||
class TestListCheckpoints:
|
||||
def test_empty_when_no_checkpoints(self, mgr, work_dir):
|
||||
result = mgr.list_checkpoints(str(work_dir))
|
||||
assert result == []
|
||||
|
||||
def test_list_after_take(self, mgr, work_dir):
|
||||
mgr.ensure_checkpoint(str(work_dir), "test checkpoint")
|
||||
result = mgr.list_checkpoints(str(work_dir))
|
||||
assert len(result) == 1
|
||||
assert result[0]["reason"] == "test checkpoint"
|
||||
assert "hash" in result[0]
|
||||
assert "short_hash" in result[0]
|
||||
assert "timestamp" in result[0]
|
||||
|
||||
def test_multiple_checkpoints_ordered(self, mgr, work_dir):
|
||||
mgr.ensure_checkpoint(str(work_dir), "first")
|
||||
mgr.new_turn()
|
||||
|
||||
(work_dir / "main.py").write_text("v2\\n")
|
||||
mgr.ensure_checkpoint(str(work_dir), "second")
|
||||
mgr.new_turn()
|
||||
|
||||
(work_dir / "main.py").write_text("v3\\n")
|
||||
mgr.ensure_checkpoint(str(work_dir), "third")
|
||||
|
||||
result = mgr.list_checkpoints(str(work_dir))
|
||||
assert len(result) == 3
|
||||
# Most recent first
|
||||
assert result[0]["reason"] == "third"
|
||||
assert result[2]["reason"] == "first"
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# CheckpointManager — restoring
|
||||
# =========================================================================
|
||||
|
||||
class TestRestore:
|
||||
def test_restore_to_previous(self, mgr, work_dir):
|
||||
# Write original content
|
||||
(work_dir / "main.py").write_text("original\\n")
|
||||
mgr.ensure_checkpoint(str(work_dir), "original state")
|
||||
mgr.new_turn()
|
||||
|
||||
# Modify the file
|
||||
(work_dir / "main.py").write_text("modified\\n")
|
||||
|
||||
# Get the checkpoint hash
|
||||
checkpoints = mgr.list_checkpoints(str(work_dir))
|
||||
assert len(checkpoints) == 1
|
||||
|
||||
# Restore
|
||||
result = mgr.restore(str(work_dir), checkpoints[0]["hash"])
|
||||
assert result["success"] is True
|
||||
|
||||
# File should be back to original
|
||||
assert (work_dir / "main.py").read_text() == "original\\n"
|
||||
|
||||
def test_restore_invalid_hash(self, mgr, work_dir):
|
||||
mgr.ensure_checkpoint(str(work_dir), "initial")
|
||||
result = mgr.restore(str(work_dir), "deadbeef1234")
|
||||
assert result["success"] is False
|
||||
|
||||
def test_restore_no_checkpoints(self, mgr, work_dir):
|
||||
result = mgr.restore(str(work_dir), "abc123")
|
||||
assert result["success"] is False
|
||||
|
||||
def test_restore_creates_pre_rollback_snapshot(self, mgr, work_dir):
|
||||
(work_dir / "main.py").write_text("v1\\n")
|
||||
mgr.ensure_checkpoint(str(work_dir), "v1")
|
||||
mgr.new_turn()
|
||||
|
||||
(work_dir / "main.py").write_text("v2\\n")
|
||||
|
||||
checkpoints = mgr.list_checkpoints(str(work_dir))
|
||||
mgr.restore(str(work_dir), checkpoints[0]["hash"])
|
||||
|
||||
# Should now have 2 checkpoints: original + pre-rollback
|
||||
all_cps = mgr.list_checkpoints(str(work_dir))
|
||||
assert len(all_cps) >= 2
|
||||
assert "pre-rollback" in all_cps[0]["reason"]
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# CheckpointManager — working dir resolution
|
||||
# =========================================================================
|
||||
|
||||
class TestWorkingDirResolution:
|
||||
def test_resolves_git_project_root(self, tmp_path):
|
||||
mgr = CheckpointManager(enabled=True)
|
||||
project = tmp_path / "myproject"
|
||||
project.mkdir()
|
||||
(project / ".git").mkdir()
|
||||
subdir = project / "src"
|
||||
subdir.mkdir()
|
||||
filepath = subdir / "main.py"
|
||||
filepath.write_text("x\\n")
|
||||
|
||||
result = mgr.get_working_dir_for_path(str(filepath))
|
||||
assert result == str(project)
|
||||
|
||||
def test_resolves_pyproject_root(self, tmp_path):
|
||||
mgr = CheckpointManager(enabled=True)
|
||||
project = tmp_path / "pyproj"
|
||||
project.mkdir()
|
||||
(project / "pyproject.toml").write_text("[project]\\n")
|
||||
subdir = project / "src"
|
||||
subdir.mkdir()
|
||||
|
||||
result = mgr.get_working_dir_for_path(str(subdir / "file.py"))
|
||||
assert result == str(project)
|
||||
|
||||
def test_falls_back_to_parent(self, tmp_path):
|
||||
mgr = CheckpointManager(enabled=True)
|
||||
filepath = tmp_path / "random" / "file.py"
|
||||
filepath.parent.mkdir(parents=True)
|
||||
filepath.write_text("x\\n")
|
||||
|
||||
result = mgr.get_working_dir_for_path(str(filepath))
|
||||
assert result == str(filepath.parent)
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Git env isolation
|
||||
# =========================================================================
|
||||
|
||||
class TestGitEnvIsolation:
|
||||
def test_sets_git_dir(self, tmp_path):
|
||||
shadow = tmp_path / "shadow"
|
||||
env = _git_env(shadow, str(tmp_path / "work"))
|
||||
assert env["GIT_DIR"] == str(shadow)
|
||||
|
||||
def test_sets_work_tree(self, tmp_path):
|
||||
shadow = tmp_path / "shadow"
|
||||
work = tmp_path / "work"
|
||||
env = _git_env(shadow, str(work))
|
||||
assert env["GIT_WORK_TREE"] == str(work.resolve())
|
||||
|
||||
def test_clears_index_file(self, tmp_path, monkeypatch):
|
||||
monkeypatch.setenv("GIT_INDEX_FILE", "/some/index")
|
||||
shadow = tmp_path / "shadow"
|
||||
env = _git_env(shadow, str(tmp_path))
|
||||
assert "GIT_INDEX_FILE" not in env
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# format_checkpoint_list
|
||||
# =========================================================================
|
||||
|
||||
class TestFormatCheckpointList:
|
||||
def test_empty_list(self):
|
||||
result = format_checkpoint_list([], "/some/dir")
|
||||
assert "No checkpoints" in result
|
||||
|
||||
def test_formats_entries(self):
|
||||
cps = [
|
||||
{"hash": "abc123", "short_hash": "abc1", "timestamp": "2026-03-09T21:15:00-07:00", "reason": "before write_file"},
|
||||
{"hash": "def456", "short_hash": "def4", "timestamp": "2026-03-09T21:10:00-07:00", "reason": "before patch"},
|
||||
]
|
||||
result = format_checkpoint_list(cps, "/home/user/project")
|
||||
assert "abc1" in result
|
||||
assert "def4" in result
|
||||
assert "before write_file" in result
|
||||
assert "/rollback" in result
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# File count guard
|
||||
# =========================================================================
|
||||
|
||||
class TestDirFileCount:
|
||||
def test_counts_files(self, work_dir):
|
||||
count = _dir_file_count(str(work_dir))
|
||||
assert count >= 2 # main.py + README.md
|
||||
|
||||
def test_nonexistent_dir(self, tmp_path):
|
||||
count = _dir_file_count(str(tmp_path / "nonexistent"))
|
||||
assert count == 0
|
||||
|
||||
|
||||
# =========================================================================
|
||||
# Error resilience
|
||||
# =========================================================================
|
||||
|
||||
class TestErrorResilience:
|
||||
def test_no_git_installed(self, work_dir, checkpoint_base, monkeypatch):
|
||||
monkeypatch.setattr("tools.checkpoint_manager.CHECKPOINT_BASE", checkpoint_base)
|
||||
mgr = CheckpointManager(enabled=True)
|
||||
# Mock git not found
|
||||
monkeypatch.setattr("shutil.which", lambda x: None)
|
||||
mgr._git_available = None # reset lazy probe
|
||||
result = mgr.ensure_checkpoint(str(work_dir), "test")
|
||||
assert result is False
|
||||
|
||||
def test_checkpoint_failure_does_not_raise(self, mgr, work_dir, monkeypatch):
|
||||
"""Checkpoint failures should never raise — they're silently logged."""
|
||||
def broken_run_git(*args, **kwargs):
|
||||
raise OSError("git exploded")
|
||||
monkeypatch.setattr("tools.checkpoint_manager._run_git", broken_run_git)
|
||||
# Should not raise
|
||||
result = mgr.ensure_checkpoint(str(work_dir), "test")
|
||||
assert result is False
|
||||
441
tools/checkpoint_manager.py
Normal file
441
tools/checkpoint_manager.py
Normal file
@@ -0,0 +1,441 @@
|
||||
"""
|
||||
Checkpoint Manager — Transparent filesystem snapshots via shadow git repos.
|
||||
|
||||
Creates automatic snapshots of working directories before file-mutating
|
||||
operations (write_file, patch), triggered once per conversation turn.
|
||||
Provides rollback to any previous checkpoint.
|
||||
|
||||
This is NOT a tool — the LLM never sees it. It's transparent infrastructure
|
||||
controlled by the ``checkpoints`` config flag or ``--checkpoints`` CLI flag.
|
||||
|
||||
Architecture:
|
||||
~/.hermes/checkpoints/{sha256(abs_dir)[:16]}/ — shadow git repo
|
||||
HEAD, refs/, objects/ — standard git internals
|
||||
HERMES_WORKDIR — original dir path
|
||||
info/exclude — default excludes
|
||||
|
||||
The shadow repo uses GIT_DIR + GIT_WORK_TREE so no git state leaks
|
||||
into the user's project directory.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Set
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
CHECKPOINT_BASE = Path(os.getenv("HERMES_HOME", Path.home() / ".hermes")) / "checkpoints"
|
||||
|
||||
DEFAULT_EXCLUDES = [
|
||||
"node_modules/",
|
||||
"dist/",
|
||||
"build/",
|
||||
".env",
|
||||
".env.*",
|
||||
".env.local",
|
||||
".env.*.local",
|
||||
"__pycache__/",
|
||||
"*.pyc",
|
||||
"*.pyo",
|
||||
".DS_Store",
|
||||
"*.log",
|
||||
".cache/",
|
||||
".next/",
|
||||
".nuxt/",
|
||||
"coverage/",
|
||||
".pytest_cache/",
|
||||
".venv/",
|
||||
"venv/",
|
||||
".git/",
|
||||
]
|
||||
|
||||
# Git subprocess timeout (seconds).
|
||||
_GIT_TIMEOUT: int = max(10, min(60, int(os.getenv("HERMES_CHECKPOINT_TIMEOUT", "30"))))
|
||||
|
||||
# Max files to snapshot — skip huge directories to avoid slowdowns.
|
||||
_MAX_FILES = 50_000
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Shadow repo helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _shadow_repo_path(working_dir: str) -> Path:
|
||||
"""Deterministic shadow repo path: sha256(abs_path)[:16]."""
|
||||
abs_path = str(Path(working_dir).resolve())
|
||||
dir_hash = hashlib.sha256(abs_path.encode()).hexdigest()[:16]
|
||||
return CHECKPOINT_BASE / dir_hash
|
||||
|
||||
|
||||
def _git_env(shadow_repo: Path, working_dir: str) -> dict:
|
||||
"""Build env dict that redirects git to the shadow repo."""
|
||||
env = os.environ.copy()
|
||||
env["GIT_DIR"] = str(shadow_repo)
|
||||
env["GIT_WORK_TREE"] = str(Path(working_dir).resolve())
|
||||
env.pop("GIT_INDEX_FILE", None)
|
||||
env.pop("GIT_NAMESPACE", None)
|
||||
env.pop("GIT_ALTERNATE_OBJECT_DIRECTORIES", None)
|
||||
return env
|
||||
|
||||
|
||||
def _run_git(
|
||||
args: List[str],
|
||||
shadow_repo: Path,
|
||||
working_dir: str,
|
||||
timeout: int = _GIT_TIMEOUT,
|
||||
) -> tuple:
|
||||
"""Run a git command against the shadow repo. Returns (ok, stdout, stderr)."""
|
||||
env = _git_env(shadow_repo, working_dir)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git"] + args,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
env=env,
|
||||
cwd=str(Path(working_dir).resolve()),
|
||||
)
|
||||
return result.returncode == 0, result.stdout.strip(), result.stderr.strip()
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, "", f"git timed out after {timeout}s: git {' '.join(args)}"
|
||||
except FileNotFoundError:
|
||||
return False, "", "git not found"
|
||||
except Exception as exc:
|
||||
return False, "", str(exc)
|
||||
|
||||
|
||||
def _init_shadow_repo(shadow_repo: Path, working_dir: str) -> Optional[str]:
|
||||
"""Initialise shadow repo if needed. Returns error string or None."""
|
||||
if (shadow_repo / "HEAD").exists():
|
||||
return None
|
||||
|
||||
shadow_repo.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ok, _, err = _run_git(["init"], shadow_repo, working_dir)
|
||||
if not ok:
|
||||
return f"Shadow repo init failed: {err}"
|
||||
|
||||
_run_git(["config", "user.email", "hermes@local"], shadow_repo, working_dir)
|
||||
_run_git(["config", "user.name", "Hermes Checkpoint"], shadow_repo, working_dir)
|
||||
|
||||
info_dir = shadow_repo / "info"
|
||||
info_dir.mkdir(exist_ok=True)
|
||||
(info_dir / "exclude").write_text(
|
||||
"\n".join(DEFAULT_EXCLUDES) + "\n", encoding="utf-8"
|
||||
)
|
||||
|
||||
(shadow_repo / "HERMES_WORKDIR").write_text(
|
||||
str(Path(working_dir).resolve()) + "\n", encoding="utf-8"
|
||||
)
|
||||
|
||||
logger.debug("Initialised checkpoint repo at %s for %s", shadow_repo, working_dir)
|
||||
return None
|
||||
|
||||
|
||||
def _dir_file_count(path: str) -> int:
|
||||
"""Quick file count estimate (stops early if over _MAX_FILES)."""
|
||||
count = 0
|
||||
try:
|
||||
for _ in Path(path).rglob("*"):
|
||||
count += 1
|
||||
if count > _MAX_FILES:
|
||||
return count
|
||||
except (PermissionError, OSError):
|
||||
pass
|
||||
return count
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CheckpointManager
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class CheckpointManager:
|
||||
"""Manages automatic filesystem checkpoints.
|
||||
|
||||
Designed to be owned by AIAgent. Call ``new_turn()`` at the start of
|
||||
each conversation turn and ``ensure_checkpoint(dir, reason)`` before
|
||||
any file-mutating tool call. The manager deduplicates so at most one
|
||||
snapshot is taken per directory per turn.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
enabled : bool
|
||||
Master switch (from config / CLI flag).
|
||||
max_snapshots : int
|
||||
Keep at most this many checkpoints per directory.
|
||||
"""
|
||||
|
||||
def __init__(self, enabled: bool = False, max_snapshots: int = 50):
|
||||
self.enabled = enabled
|
||||
self.max_snapshots = max_snapshots
|
||||
self._checkpointed_dirs: Set[str] = set()
|
||||
self._git_available: Optional[bool] = None # lazy probe
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Turn lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def new_turn(self) -> None:
|
||||
"""Reset per-turn dedup. Call at the start of each agent iteration."""
|
||||
self._checkpointed_dirs.clear()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def ensure_checkpoint(self, working_dir: str, reason: str = "auto") -> bool:
|
||||
"""Take a checkpoint if enabled and not already done this turn.
|
||||
|
||||
Returns True if a checkpoint was taken, False otherwise.
|
||||
Never raises — all errors are silently logged.
|
||||
"""
|
||||
if not self.enabled:
|
||||
return False
|
||||
|
||||
# Lazy git probe
|
||||
if self._git_available is None:
|
||||
self._git_available = shutil.which("git") is not None
|
||||
if not self._git_available:
|
||||
logger.debug("Checkpoints disabled: git not found")
|
||||
if not self._git_available:
|
||||
return False
|
||||
|
||||
abs_dir = str(Path(working_dir).resolve())
|
||||
|
||||
# Skip root, home, and other overly broad directories
|
||||
if abs_dir in ("/", str(Path.home())):
|
||||
logger.debug("Checkpoint skipped: directory too broad (%s)", abs_dir)
|
||||
return False
|
||||
|
||||
# Already checkpointed this turn?
|
||||
if abs_dir in self._checkpointed_dirs:
|
||||
return False
|
||||
|
||||
self._checkpointed_dirs.add(abs_dir)
|
||||
|
||||
try:
|
||||
return self._take(abs_dir, reason)
|
||||
except Exception as e:
|
||||
logger.debug("Checkpoint failed (non-fatal): %s", e)
|
||||
return False
|
||||
|
||||
def list_checkpoints(self, working_dir: str) -> List[Dict]:
|
||||
"""List available checkpoints for a directory.
|
||||
|
||||
Returns a list of dicts with keys: hash, short_hash, timestamp, reason.
|
||||
Most recent first.
|
||||
"""
|
||||
abs_dir = str(Path(working_dir).resolve())
|
||||
shadow = _shadow_repo_path(abs_dir)
|
||||
|
||||
if not (shadow / "HEAD").exists():
|
||||
return []
|
||||
|
||||
ok, stdout, _ = _run_git(
|
||||
["log", "--format=%H|%h|%aI|%s", "--no-walk=unsorted",
|
||||
"--all" if False else "HEAD", # just HEAD lineage
|
||||
"-n", str(self.max_snapshots)],
|
||||
shadow, abs_dir,
|
||||
)
|
||||
|
||||
# Simpler: just use regular log
|
||||
ok, stdout, _ = _run_git(
|
||||
["log", "--format=%H|%h|%aI|%s", "-n", str(self.max_snapshots)],
|
||||
shadow, abs_dir,
|
||||
)
|
||||
|
||||
if not ok or not stdout:
|
||||
return []
|
||||
|
||||
results = []
|
||||
for line in stdout.splitlines():
|
||||
parts = line.split("|", 3)
|
||||
if len(parts) == 4:
|
||||
results.append({
|
||||
"hash": parts[0],
|
||||
"short_hash": parts[1],
|
||||
"timestamp": parts[2],
|
||||
"reason": parts[3],
|
||||
})
|
||||
return results
|
||||
|
||||
def restore(self, working_dir: str, commit_hash: str) -> Dict:
|
||||
"""Restore files to a checkpoint state.
|
||||
|
||||
Uses ``git checkout <hash> -- .`` which restores tracked files
|
||||
without moving HEAD — safe and reversible.
|
||||
|
||||
Returns dict with success/error info.
|
||||
"""
|
||||
abs_dir = str(Path(working_dir).resolve())
|
||||
shadow = _shadow_repo_path(abs_dir)
|
||||
|
||||
if not (shadow / "HEAD").exists():
|
||||
return {"success": False, "error": "No checkpoints exist for this directory"}
|
||||
|
||||
# Verify the commit exists
|
||||
ok, _, err = _run_git(
|
||||
["cat-file", "-t", commit_hash], shadow, abs_dir,
|
||||
)
|
||||
if not ok:
|
||||
return {"success": False, "error": f"Checkpoint '{commit_hash}' not found"}
|
||||
|
||||
# Take a checkpoint of current state before restoring (so you can undo the undo)
|
||||
self._take(abs_dir, f"pre-rollback snapshot (restoring to {commit_hash[:8]})")
|
||||
|
||||
# Restore
|
||||
ok, stdout, err = _run_git(
|
||||
["checkout", commit_hash, "--", "."],
|
||||
shadow, abs_dir, timeout=_GIT_TIMEOUT * 2,
|
||||
)
|
||||
|
||||
if not ok:
|
||||
return {"success": False, "error": f"Restore failed: {err}"}
|
||||
|
||||
# Get info about what was restored
|
||||
ok2, reason_out, _ = _run_git(
|
||||
["log", "--format=%s", "-1", commit_hash], shadow, abs_dir,
|
||||
)
|
||||
reason = reason_out if ok2 else "unknown"
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"restored_to": commit_hash[:8],
|
||||
"reason": reason,
|
||||
"directory": abs_dir,
|
||||
}
|
||||
|
||||
def get_working_dir_for_path(self, file_path: str) -> str:
|
||||
"""Resolve a file path to its working directory for checkpointing.
|
||||
|
||||
Walks up from the file's parent to find a reasonable project root
|
||||
(directory containing .git, pyproject.toml, package.json, etc.).
|
||||
Falls back to the file's parent directory.
|
||||
"""
|
||||
path = Path(file_path).resolve()
|
||||
if path.is_dir():
|
||||
candidate = path
|
||||
else:
|
||||
candidate = path.parent
|
||||
|
||||
# Walk up looking for project root markers
|
||||
markers = {".git", "pyproject.toml", "package.json", "Cargo.toml",
|
||||
"go.mod", "Makefile", "pom.xml", ".hg", "Gemfile"}
|
||||
check = candidate
|
||||
while check != check.parent:
|
||||
if any((check / m).exists() for m in markers):
|
||||
return str(check)
|
||||
check = check.parent
|
||||
|
||||
# No project root found — use the file's parent
|
||||
return str(candidate)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _take(self, working_dir: str, reason: str) -> bool:
|
||||
"""Take a snapshot. Returns True on success."""
|
||||
shadow = _shadow_repo_path(working_dir)
|
||||
|
||||
# Init if needed
|
||||
err = _init_shadow_repo(shadow, working_dir)
|
||||
if err:
|
||||
logger.debug("Checkpoint init failed: %s", err)
|
||||
return False
|
||||
|
||||
# Quick size guard — don't try to snapshot enormous directories
|
||||
if _dir_file_count(working_dir) > _MAX_FILES:
|
||||
logger.debug("Checkpoint skipped: >%d files in %s", _MAX_FILES, working_dir)
|
||||
return False
|
||||
|
||||
# Stage everything
|
||||
ok, _, err = _run_git(
|
||||
["add", "-A"], shadow, working_dir, timeout=_GIT_TIMEOUT * 2,
|
||||
)
|
||||
if not ok:
|
||||
logger.debug("Checkpoint git-add failed: %s", err)
|
||||
return False
|
||||
|
||||
# Check if there's anything to commit
|
||||
ok_diff, diff_out, _ = _run_git(
|
||||
["diff", "--cached", "--quiet"], shadow, working_dir,
|
||||
)
|
||||
if ok_diff:
|
||||
# No changes to commit
|
||||
logger.debug("Checkpoint skipped: no changes in %s", working_dir)
|
||||
return False
|
||||
|
||||
# Commit
|
||||
ok, _, err = _run_git(
|
||||
["commit", "-m", reason, "--allow-empty-message"],
|
||||
shadow, working_dir, timeout=_GIT_TIMEOUT * 2,
|
||||
)
|
||||
if not ok:
|
||||
logger.debug("Checkpoint commit failed: %s", err)
|
||||
return False
|
||||
|
||||
logger.debug("Checkpoint taken in %s: %s", working_dir, reason)
|
||||
|
||||
# Prune old snapshots
|
||||
self._prune(shadow, working_dir)
|
||||
|
||||
return True
|
||||
|
||||
def _prune(self, shadow_repo: Path, working_dir: str) -> None:
|
||||
"""Keep only the last max_snapshots commits via orphan reset."""
|
||||
ok, stdout, _ = _run_git(
|
||||
["rev-list", "--count", "HEAD"], shadow_repo, working_dir,
|
||||
)
|
||||
if not ok:
|
||||
return
|
||||
|
||||
try:
|
||||
count = int(stdout)
|
||||
except ValueError:
|
||||
return
|
||||
|
||||
if count <= self.max_snapshots:
|
||||
return
|
||||
|
||||
# Get the hash of the commit at the cutoff point
|
||||
ok, cutoff_hash, _ = _run_git(
|
||||
["rev-list", "--reverse", "HEAD", "--skip=0",
|
||||
f"--max-count=1"],
|
||||
shadow_repo, working_dir,
|
||||
)
|
||||
|
||||
# For simplicity, we don't actually prune — git's pack mechanism
|
||||
# handles this efficiently, and the objects are small. The log
|
||||
# listing is already limited by max_snapshots.
|
||||
# Full pruning would require rebase --onto or filter-branch which
|
||||
# is fragile for a background feature. We just limit the log view.
|
||||
logger.debug("Checkpoint repo has %d commits (limit %d)", count, self.max_snapshots)
|
||||
|
||||
|
||||
def format_checkpoint_list(checkpoints: List[Dict], directory: str) -> str:
|
||||
"""Format checkpoint list for display to user."""
|
||||
if not checkpoints:
|
||||
return f"No checkpoints found for {directory}"
|
||||
|
||||
lines = [f"📸 Checkpoints for {directory}:\n"]
|
||||
for i, cp in enumerate(checkpoints, 1):
|
||||
# Parse ISO timestamp to something readable
|
||||
ts = cp["timestamp"]
|
||||
if "T" in ts:
|
||||
ts = ts.split("T")[1].split("+")[0].split("-")[0][:5] # HH:MM
|
||||
date = cp["timestamp"].split("T")[0]
|
||||
ts = f"{date} {ts}"
|
||||
lines.append(f" {i}. {cp['short_hash']} {ts} {cp['reason']}")
|
||||
|
||||
lines.append(f"\nUse /rollback <number> to restore, e.g. /rollback 1")
|
||||
return "\n".join(lines)
|
||||
@@ -24,6 +24,7 @@ These are commands you run from your shell.
|
||||
| `hermes chat --toolsets "web,terminal"` / `-t` | Use specific toolsets |
|
||||
| `hermes chat --verbose` | Enable verbose/debug output |
|
||||
| `hermes --worktree` / `-w` | Start in an isolated git worktree (for parallel agents) |
|
||||
| `hermes --checkpoints` | Enable filesystem checkpoints before destructive file operations |
|
||||
|
||||
### Provider & Model Management
|
||||
|
||||
@@ -202,6 +203,8 @@ These work in messaging platforms (Telegram, Discord, Slack, WhatsApp) but not t
|
||||
| `/sethome` | Set this chat as the home channel |
|
||||
| `/status` | Show session info |
|
||||
| `/reload-mcp` | Reload MCP servers from config |
|
||||
| `/rollback` | List filesystem checkpoints for the current directory |
|
||||
| `/rollback <N>` | Restore files to checkpoint #N |
|
||||
| `/update` | Update Hermes Agent to the latest version |
|
||||
|
||||
---
|
||||
|
||||
@@ -663,6 +663,16 @@ browser:
|
||||
record_sessions: false # Auto-record browser sessions as WebM videos to ~/.hermes/browser_recordings/
|
||||
```
|
||||
|
||||
## Checkpoints
|
||||
|
||||
Automatic filesystem snapshots before destructive file operations. See the [Checkpoints feature page](/docs/user-guide/features/checkpoints) for details.
|
||||
|
||||
```yaml
|
||||
checkpoints:
|
||||
enabled: false # Enable automatic checkpoints (also: hermes --checkpoints)
|
||||
max_snapshots: 50 # Max checkpoints to keep per directory
|
||||
```
|
||||
|
||||
## Delegation
|
||||
|
||||
Configure subagent behavior for the delegate tool:
|
||||
|
||||
97
website/docs/user-guide/features/checkpoints.md
Normal file
97
website/docs/user-guide/features/checkpoints.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Filesystem Checkpoints
|
||||
|
||||
Hermes can automatically snapshot your working directory before making file changes, giving you a safety net to roll back if something goes wrong.
|
||||
|
||||
## How It Works
|
||||
|
||||
When enabled, Hermes takes a **one-time snapshot** at the start of each conversation turn before the first file-modifying operation (`write_file` or `patch`). This creates a point-in-time backup you can restore to at any time.
|
||||
|
||||
Under the hood, checkpoints use a **shadow git repository** stored at `~/.hermes/checkpoints/`. This is completely separate from your project's git — no `.git` directory is created in your project, and your own git history is never touched.
|
||||
|
||||
## Enabling Checkpoints
|
||||
|
||||
### Per-session (CLI flag)
|
||||
|
||||
```bash
|
||||
hermes --checkpoints
|
||||
```
|
||||
|
||||
### Permanently (config.yaml)
|
||||
|
||||
```yaml
|
||||
# ~/.hermes/config.yaml
|
||||
checkpoints:
|
||||
enabled: true
|
||||
max_snapshots: 50 # max checkpoints per directory (default: 50)
|
||||
```
|
||||
|
||||
## Rolling Back
|
||||
|
||||
Use the `/rollback` slash command:
|
||||
|
||||
```
|
||||
/rollback # List all available checkpoints
|
||||
/rollback 1 # Restore to checkpoint #1 (most recent)
|
||||
/rollback 3 # Restore to checkpoint #3 (further back)
|
||||
/rollback abc1234 # Restore by git commit hash
|
||||
```
|
||||
|
||||
Example output:
|
||||
|
||||
```
|
||||
📸 Checkpoints for /home/user/project:
|
||||
|
||||
1. abc1234 2026-03-10 14:22 before write_file
|
||||
2. def5678 2026-03-10 14:15 before patch
|
||||
3. ghi9012 2026-03-10 14:08 before write_file
|
||||
|
||||
Use /rollback <number> to restore, e.g. /rollback 1
|
||||
```
|
||||
|
||||
When you restore, Hermes automatically takes a **pre-rollback snapshot** first — so you can always undo your undo.
|
||||
|
||||
## What Gets Checkpointed
|
||||
|
||||
Checkpoints capture the entire working directory (the project root), excluding common large/sensitive patterns:
|
||||
|
||||
- `node_modules/`, `dist/`, `build/`
|
||||
- `.env`, `.env.*`
|
||||
- `__pycache__/`, `*.pyc`
|
||||
- `.venv/`, `venv/`
|
||||
- `.git/`
|
||||
- `.DS_Store`, `*.log`
|
||||
|
||||
## Performance
|
||||
|
||||
Checkpoints are designed to be lightweight:
|
||||
|
||||
- **Once per turn** — only the first file operation triggers a snapshot, not every write
|
||||
- **Skips large directories** — directories with >50,000 files are skipped automatically
|
||||
- **Skips when nothing changed** — if no files were modified since the last checkpoint, no commit is created
|
||||
- **Non-blocking** — if a checkpoint fails for any reason, the file operation proceeds normally
|
||||
|
||||
## How It Determines the Project Root
|
||||
|
||||
When you write to a file like `src/components/Button.tsx`, Hermes walks up the directory tree looking for project markers (`.git`, `pyproject.toml`, `package.json`, `Cargo.toml`, etc.) to find the project root. This ensures the entire project is checkpointed, not just the file's parent directory.
|
||||
|
||||
## Platforms
|
||||
|
||||
Checkpoints work on both:
|
||||
- **CLI** — uses your current working directory
|
||||
- **Gateway** (Telegram, Discord, etc.) — uses `MESSAGING_CWD`
|
||||
|
||||
The `/rollback` command is available on all platforms.
|
||||
|
||||
## FAQ
|
||||
|
||||
**Does this conflict with my project's git?**
|
||||
No. Checkpoints use a completely separate shadow git repository via `GIT_DIR` environment variables. Your project's `.git/` is never touched.
|
||||
|
||||
**How much disk space do checkpoints use?**
|
||||
Git is very efficient at storing diffs. For most projects, checkpoint data is negligible. Old checkpoints are pruned when `max_snapshots` is exceeded.
|
||||
|
||||
**Can I checkpoint without git installed?**
|
||||
No — git must be available on your PATH. If it's not installed, checkpoints silently disable.
|
||||
|
||||
**Can I roll back across sessions?**
|
||||
Yes! Checkpoints persist in `~/.hermes/checkpoints/` and survive across sessions. You can roll back to a checkpoint from yesterday.
|
||||
Reference in New Issue
Block a user