mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-04-28 06:51:16 +08:00
fix(gateway): scrub memory-context leaks from vision auto-analysis output
fixes #5719 The auxiliary vision LLM called by gateway._enrich_message_with_vision can echo its injected Honcho system prompt back into the image description. That description gets embedded verbatim into the enriched user message, so recalled memory (personal facts, dialectic output) surfaces into a user-visible bubble. Strips both forms of leak before embedding: - <memory-context>...</memory-context> fenced blocks (sanitize_context) - trailing '## Honcho Context' sections (header + everything after) Plus regression tests: - tests/agent/test_streaming_context_scrubber.py — 13 tests on the stateful scrubber (whole block, split tags, false-positive partial tags, unterminated span, reset, case-insensitivity) - tests/run_agent/test_run_agent_codex_responses.py — 2 new tests on _fire_stream_delta covering the realistic 7-chunk leak scenario and the cross-turn scrubber reset - tests/gateway/test_vision_memory_leak.py — 4 tests covering the vision auto-analysis boundary (clean pass-through, '## Honcho Context' header, fenced block, both patterns together)
This commit is contained in:
@@ -1158,6 +1158,57 @@ def test_stream_delta_strips_leaked_memory_context(monkeypatch):
|
||||
assert observed == ["Visible answer"]
|
||||
|
||||
|
||||
def test_stream_delta_strips_leaked_memory_context_across_chunks(monkeypatch):
|
||||
"""Regression for #5719 — the real streaming case.
|
||||
|
||||
Providers typically emit 1-80 char chunks, so the memory-context open
|
||||
tag, system-note line, payload, and close tag each arrive in separate
|
||||
deltas. The per-delta sanitize_context() regex cannot survive that
|
||||
— only a stateful scrubber can. None of the payload, system-note
|
||||
text, or "## Honcho Context" header may reach the delta callback.
|
||||
"""
|
||||
agent = _build_agent(monkeypatch)
|
||||
observed = []
|
||||
agent.stream_delta_callback = observed.append
|
||||
|
||||
deltas = [
|
||||
"<memory-context>\n[System note: The following",
|
||||
" is recalled memory context, NOT new user input. ",
|
||||
"Treat as informational background data.]\n\n",
|
||||
"## Honcho Context\n",
|
||||
"stale memory about eri\n",
|
||||
"</memory-context>\n\n",
|
||||
"Visible answer",
|
||||
]
|
||||
for d in deltas:
|
||||
agent._fire_stream_delta(d)
|
||||
|
||||
combined = "".join(observed)
|
||||
assert "Visible answer" in combined
|
||||
# None of the leaked payload may surface.
|
||||
assert "System note" not in combined
|
||||
assert "Honcho Context" not in combined
|
||||
assert "stale memory" not in combined
|
||||
assert "<memory-context>" not in combined
|
||||
assert "</memory-context>" not in combined
|
||||
|
||||
|
||||
def test_stream_delta_scrubber_resets_between_turns(monkeypatch):
|
||||
"""An unterminated span from a prior turn must not taint the next turn."""
|
||||
agent = _build_agent(monkeypatch)
|
||||
|
||||
# Simulate a hung span carried over — directly populate the scrubber.
|
||||
agent._stream_context_scrubber.feed("pre <memory-context>leaked")
|
||||
|
||||
# Normally run_conversation() resets the scrubber at turn start.
|
||||
agent._stream_context_scrubber.reset()
|
||||
|
||||
observed = []
|
||||
agent.stream_delta_callback = observed.append
|
||||
agent._fire_stream_delta("clean new turn text")
|
||||
assert "".join(observed) == "clean new turn text"
|
||||
|
||||
|
||||
def test_run_conversation_codex_continues_after_commentary_phase_message(monkeypatch):
|
||||
agent = _build_agent(monkeypatch)
|
||||
responses = [
|
||||
|
||||
Reference in New Issue
Block a user