mirror of
https://github.com/NousResearch/hermes-agent.git
synced 2026-05-03 09:17:09 +08:00
feat(plugin): holographic memory store adapted to MemoryProvider interface
Adapts PR #2351 by dusterbloom to use the new MemoryProvider ABC. Core files (store.py, retrieval.py, holographic.py) unchanged from the original PR. The __init__.py register() function now calls ctx.register_memory_provider() instead of ctx.register_tool(). HolographicMemoryProvider implements: - initialize() — creates SQLite DB + FactRetriever - system_prompt_block() — shows fact count when active - prefetch(query) — FTS5 search for turn context - get_tool_schemas() — fact_store (9 actions) + fact_feedback - handle_tool_call() — routes to store/retriever - on_session_end() — auto-extract preferences (opt-in) - on_memory_write() — mirrors builtin memory writes as facts 39 tests (22 HRR math + 17 provider adapter), all passing.
This commit is contained in:
0
tests/plugins/__init__.py
Normal file
0
tests/plugins/__init__.py
Normal file
248
tests/plugins/test_holographic.py
Normal file
248
tests/plugins/test_holographic.py
Normal file
@@ -0,0 +1,248 @@
|
||||
"""Tests for holographic.py — pure HRR math operations.
|
||||
|
||||
All tests are synthetic: no filesystem, no database, no external state.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
# Plugin path: prefer home dir install, fall back to in-repo copy
|
||||
_plugin_dir = Path.home() / ".hermes" / "plugins" / "hermes-memory-store"
|
||||
if not _plugin_dir.exists():
|
||||
_plugin_dir = Path(__file__).resolve().parent.parent.parent / "plugins" / "hermes-memory-store"
|
||||
sys.path.insert(0, str(_plugin_dir))
|
||||
|
||||
from holographic import (
|
||||
_HAS_NUMPY,
|
||||
bind,
|
||||
bundle,
|
||||
bytes_to_phases,
|
||||
encode_atom,
|
||||
encode_fact,
|
||||
encode_text,
|
||||
phases_to_bytes,
|
||||
similarity,
|
||||
snr_estimate,
|
||||
unbind,
|
||||
)
|
||||
|
||||
|
||||
DIM = 256 # Smaller dim for fast tests; math properties hold at any dim.
|
||||
|
||||
|
||||
class TestEncodeAtom:
|
||||
def test_deterministic(self):
|
||||
"""Same input always produces the identical vector."""
|
||||
v1 = encode_atom("hello", DIM)
|
||||
v2 = encode_atom("hello", DIM)
|
||||
np.testing.assert_array_equal(v1, v2)
|
||||
|
||||
def test_shape_and_dtype(self):
|
||||
v = encode_atom("test", DIM)
|
||||
assert v.shape == (DIM,)
|
||||
assert v.dtype == np.float64
|
||||
|
||||
def test_phase_range(self):
|
||||
"""All phases must be in [0, 2π)."""
|
||||
v = encode_atom("range_check", DIM)
|
||||
assert np.all(v >= 0.0)
|
||||
assert np.all(v < 2.0 * np.pi)
|
||||
|
||||
def test_near_orthogonal(self):
|
||||
"""Random unrelated words should have near-zero similarity."""
|
||||
words = ["apple", "quantum", "bicycle", "telescope", "jazz"]
|
||||
vectors = [encode_atom(w, DIM) for w in words]
|
||||
for i in range(len(vectors)):
|
||||
for j in range(i + 1, len(vectors)):
|
||||
sim = similarity(vectors[i], vectors[j])
|
||||
assert abs(sim) < 0.15, f"'{words[i]}' vs '{words[j]}': sim={sim:.4f}"
|
||||
|
||||
|
||||
class TestBindUnbind:
|
||||
def test_roundtrip(self):
|
||||
"""unbind(bind(a, b), b) should recover a exactly."""
|
||||
a = encode_atom("concept_a", DIM)
|
||||
b = encode_atom("concept_b", DIM)
|
||||
bound = bind(a, b)
|
||||
recovered = unbind(bound, b)
|
||||
np.testing.assert_allclose(recovered, a, atol=1e-10)
|
||||
|
||||
def test_commutative(self):
|
||||
"""bind(a, b) == bind(b, a) — phase addition is commutative."""
|
||||
a = encode_atom("alpha", DIM)
|
||||
b = encode_atom("beta", DIM)
|
||||
np.testing.assert_allclose(bind(a, b), bind(b, a), atol=1e-10)
|
||||
|
||||
def test_bound_dissimilar_to_inputs(self):
|
||||
"""The bound vector should be quasi-orthogonal to both inputs."""
|
||||
a = encode_atom("dog", DIM)
|
||||
b = encode_atom("cat", DIM)
|
||||
bound = bind(a, b)
|
||||
assert abs(similarity(bound, a)) < 0.15
|
||||
assert abs(similarity(bound, b)) < 0.15
|
||||
|
||||
|
||||
class TestBundle:
|
||||
def test_preserves_similarity(self):
|
||||
"""Bundled vector should be similar to each of its components."""
|
||||
vecs = [encode_atom(f"item_{i}", DIM) for i in range(3)]
|
||||
bundled = bundle(*vecs)
|
||||
for v in vecs:
|
||||
sim = similarity(bundled, v)
|
||||
assert sim > 0.2, f"Bundle lost signal: sim={sim:.4f}"
|
||||
|
||||
def test_capacity_degrades(self):
|
||||
"""Similarity to each component should decrease as more items are added."""
|
||||
target = encode_atom("target", DIM)
|
||||
sims = []
|
||||
for n in [2, 5, 10, 20]:
|
||||
others = [encode_atom(f"noise_{i}", DIM) for i in range(n - 1)]
|
||||
bundled = bundle(target, *others)
|
||||
sims.append(similarity(bundled, target))
|
||||
# Similarity should generally decrease (allow minor non-monotonicity)
|
||||
assert sims[0] > sims[-1], f"No degradation: {sims}"
|
||||
|
||||
|
||||
class TestSimilarity:
|
||||
def test_identity(self):
|
||||
"""similarity(a, a) should be exactly 1.0."""
|
||||
a = encode_atom("self", DIM)
|
||||
assert similarity(a, a) == pytest.approx(1.0)
|
||||
|
||||
def test_orthogonal_near_zero(self):
|
||||
"""Random vectors should have similarity near 0."""
|
||||
sims = []
|
||||
for i in range(10):
|
||||
a = encode_atom(f"rand_a_{i}", DIM)
|
||||
b = encode_atom(f"rand_b_{i}", DIM)
|
||||
sims.append(similarity(a, b))
|
||||
mean_sim = np.mean(sims)
|
||||
assert abs(mean_sim) < 0.1, f"Mean similarity too high: {mean_sim:.4f}"
|
||||
|
||||
|
||||
class TestEncodeText:
|
||||
def test_order_invariant(self):
|
||||
"""Bag-of-words should be order-invariant."""
|
||||
v1 = encode_text("the quick brown fox", DIM)
|
||||
v2 = encode_text("fox brown quick the", DIM)
|
||||
sim = similarity(v1, v2)
|
||||
assert sim == pytest.approx(1.0, abs=1e-10)
|
||||
|
||||
def test_similar_texts_high_similarity(self):
|
||||
"""Texts sharing words should have high similarity."""
|
||||
v1 = encode_text("the cat sat on the mat", DIM)
|
||||
v2 = encode_text("the cat on the mat", DIM)
|
||||
sim = similarity(v1, v2)
|
||||
assert sim > 0.5, f"Similar texts low sim: {sim:.4f}"
|
||||
|
||||
def test_empty_text(self):
|
||||
"""Empty text should return a valid vector (the __hrr_empty__ atom)."""
|
||||
v = encode_text("", DIM)
|
||||
assert v.shape == (DIM,)
|
||||
|
||||
|
||||
class TestEncodeFact:
|
||||
def test_entity_extraction(self):
|
||||
"""Unbinding entity from fact should recover content signal."""
|
||||
content = "prefers rust for systems programming"
|
||||
entities = ["peppi"]
|
||||
|
||||
fact_vec = encode_fact(content, entities, DIM)
|
||||
content_vec = encode_text(content, DIM)
|
||||
|
||||
# Unbind: fact - bind(entity, ROLE_ENTITY) should be similar to bind(content, ROLE_CONTENT)
|
||||
role_entity = encode_atom("__hrr_role_entity__", DIM)
|
||||
role_content = encode_atom("__hrr_role_content__", DIM)
|
||||
entity_vec = encode_atom("peppi", DIM)
|
||||
|
||||
# Extract what's associated with peppi's entity role
|
||||
probe = unbind(fact_vec, bind(entity_vec, role_entity))
|
||||
|
||||
# The extracted signal should have nonzero similarity to the content-role binding
|
||||
content_bound = bind(content_vec, role_content)
|
||||
sim = similarity(probe, content_bound)
|
||||
# At DIM=256, 2-component bundle: SNR≈11, but phase cosine similarity compresses
|
||||
# the signal. Noise baseline is ~0.035 std; signal should be above 0.03.
|
||||
assert sim > 0.03, f"Entity extraction failed: sim={sim:.4f}"
|
||||
|
||||
def test_multiple_entities(self):
|
||||
"""Facts with multiple entities should encode all of them."""
|
||||
fact_vec = encode_fact("loves pizza", ["alice", "bob"], DIM)
|
||||
assert fact_vec.shape == (DIM,)
|
||||
# Both entities should be recoverable (above noise floor)
|
||||
role_entity = encode_atom("__hrr_role_entity__", DIM)
|
||||
for name in ["alice", "bob"]:
|
||||
entity_vec = encode_atom(name, DIM)
|
||||
probe = unbind(fact_vec, bind(entity_vec, role_entity))
|
||||
# Just verify it's a valid vector (deeper tests would check signal)
|
||||
assert probe.shape == (DIM,)
|
||||
|
||||
|
||||
class TestSerialization:
|
||||
def test_roundtrip(self):
|
||||
"""bytes_to_phases(phases_to_bytes(v)) should recover v exactly."""
|
||||
v = encode_atom("serialize_me", DIM)
|
||||
data = phases_to_bytes(v)
|
||||
recovered = bytes_to_phases(data)
|
||||
np.testing.assert_array_equal(v, recovered)
|
||||
|
||||
def test_byte_size(self):
|
||||
"""float64 * dim = 8 * dim bytes."""
|
||||
v = encode_atom("size_check", DIM)
|
||||
data = phases_to_bytes(v)
|
||||
assert len(data) == DIM * 8
|
||||
|
||||
|
||||
class TestSNREstimate:
|
||||
def test_formula(self):
|
||||
"""SNR should match sqrt(dim / n_items)."""
|
||||
import math
|
||||
assert snr_estimate(1024, 4) == pytest.approx(math.sqrt(1024 / 4))
|
||||
assert snr_estimate(1024, 256) == pytest.approx(math.sqrt(1024 / 256))
|
||||
|
||||
def test_empty(self):
|
||||
"""Zero items → infinite SNR."""
|
||||
assert snr_estimate(1024, 0) == float("inf")
|
||||
|
||||
def test_warning_logged(self, caplog):
|
||||
"""SNR < 2.0 should emit a warning."""
|
||||
import logging
|
||||
with caplog.at_level(logging.WARNING):
|
||||
snr_estimate(4, 4) # SNR = 1.0
|
||||
assert "near capacity" in caplog.text.lower()
|
||||
|
||||
|
||||
class TestNumpyGuard:
|
||||
def test_raises_without_numpy(self):
|
||||
"""All public functions should raise RuntimeError when numpy is absent."""
|
||||
import holographic
|
||||
|
||||
original = holographic._HAS_NUMPY
|
||||
try:
|
||||
holographic._HAS_NUMPY = False
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
encode_atom("test", DIM)
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
bind(np.zeros(DIM), np.zeros(DIM))
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
unbind(np.zeros(DIM), np.zeros(DIM))
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
bundle(np.zeros(DIM))
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
similarity(np.zeros(DIM), np.zeros(DIM))
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
encode_text("test", DIM)
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
encode_fact("test", ["e"], DIM)
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
phases_to_bytes(np.zeros(DIM))
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
bytes_to_phases(b"\x00" * DIM * 8)
|
||||
with pytest.raises(RuntimeError, match="numpy is required"):
|
||||
snr_estimate(DIM, 1)
|
||||
finally:
|
||||
holographic._HAS_NUMPY = original
|
||||
336
tests/plugins/test_holographic_provider.py
Normal file
336
tests/plugins/test_holographic_provider.py
Normal file
@@ -0,0 +1,336 @@
|
||||
"""Tests for the holographic memory MemoryProvider adapter.
|
||||
|
||||
Tests the HolographicMemoryProvider interface — registration, tool handling,
|
||||
prefetch, session end hooks, and memory bridging.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
# Add plugin dir to path so imports work
|
||||
_plugin_dir = Path(__file__).resolve().parent.parent.parent / "plugins" / "hermes-memory-store"
|
||||
sys.path.insert(0, str(_plugin_dir))
|
||||
|
||||
from agent.memory_manager import MemoryManager
|
||||
from agent.builtin_memory_provider import BuiltinMemoryProvider
|
||||
|
||||
|
||||
def _make_provider(tmp_path, config=None):
|
||||
"""Create a HolographicMemoryProvider with a temp DB."""
|
||||
# Import inside function to avoid module-level issues
|
||||
sys.path.insert(0, str(_plugin_dir))
|
||||
from plugins import HolographicMemoryProvider # noqa: F811
|
||||
# Use the full import path
|
||||
from importlib import import_module
|
||||
init_mod = import_module("plugins.hermes-memory-store")
|
||||
|
||||
cfg = config or {}
|
||||
cfg.setdefault("db_path", str(tmp_path / "test.db"))
|
||||
provider = init_mod.HolographicMemoryProvider(config=cfg)
|
||||
provider.initialize(session_id="test-session")
|
||||
return provider
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def provider(tmp_path):
|
||||
"""Create an initialized holographic provider."""
|
||||
sys.path.insert(0, str(_plugin_dir.parent))
|
||||
# Direct import
|
||||
spec_path = _plugin_dir / "__init__.py"
|
||||
import importlib.util
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_test",
|
||||
spec_path,
|
||||
submodule_search_locations=[str(_plugin_dir)],
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules["hermes_memory_store_test"] = mod
|
||||
# Pre-populate submodule references
|
||||
store_spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_test.store",
|
||||
_plugin_dir / "store.py",
|
||||
)
|
||||
store_mod = importlib.util.module_from_spec(store_spec)
|
||||
sys.modules["hermes_memory_store_test.store"] = store_mod
|
||||
store_spec.loader.exec_module(store_mod)
|
||||
|
||||
retrieval_spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_test.retrieval",
|
||||
_plugin_dir / "retrieval.py",
|
||||
)
|
||||
retrieval_mod = importlib.util.module_from_spec(retrieval_spec)
|
||||
sys.modules["hermes_memory_store_test.retrieval"] = retrieval_mod
|
||||
retrieval_spec.loader.exec_module(retrieval_mod)
|
||||
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
cfg = {"db_path": str(tmp_path / "test.db")}
|
||||
p = mod.HolographicMemoryProvider(config=cfg)
|
||||
p.initialize(session_id="test-session")
|
||||
yield p
|
||||
p.shutdown()
|
||||
|
||||
# Cleanup
|
||||
for key in list(sys.modules):
|
||||
if key.startswith("hermes_memory_store_test"):
|
||||
del sys.modules[key]
|
||||
|
||||
|
||||
class TestProviderRegistration:
|
||||
def test_register_calls_register_memory_provider(self, tmp_path):
|
||||
"""register(ctx) should call ctx.register_memory_provider()."""
|
||||
import importlib.util
|
||||
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_reg",
|
||||
_plugin_dir / "__init__.py",
|
||||
submodule_search_locations=[str(_plugin_dir)],
|
||||
)
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules["hermes_memory_store_reg"] = mod
|
||||
|
||||
store_spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_reg.store", _plugin_dir / "store.py")
|
||||
store_mod = importlib.util.module_from_spec(store_spec)
|
||||
sys.modules["hermes_memory_store_reg.store"] = store_mod
|
||||
store_spec.loader.exec_module(store_mod)
|
||||
|
||||
retrieval_spec = importlib.util.spec_from_file_location(
|
||||
"hermes_memory_store_reg.retrieval", _plugin_dir / "retrieval.py")
|
||||
retrieval_mod = importlib.util.module_from_spec(retrieval_spec)
|
||||
sys.modules["hermes_memory_store_reg.retrieval"] = retrieval_mod
|
||||
retrieval_spec.loader.exec_module(retrieval_mod)
|
||||
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
ctx = MagicMock()
|
||||
mod.register(ctx)
|
||||
ctx.register_memory_provider.assert_called_once()
|
||||
registered = ctx.register_memory_provider.call_args[0][0]
|
||||
assert registered.name == "holographic"
|
||||
|
||||
for key in list(sys.modules):
|
||||
if key.startswith("hermes_memory_store_reg"):
|
||||
del sys.modules[key]
|
||||
|
||||
|
||||
class TestToolHandling:
|
||||
def test_add_and_search(self, provider):
|
||||
"""Add a fact via tool call, then search for it."""
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "User prefers vim over emacs"}
|
||||
))
|
||||
assert "fact_id" in result
|
||||
fact_id = result["fact_id"]
|
||||
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "search", "query": "vim emacs"}
|
||||
))
|
||||
assert result["count"] >= 1
|
||||
contents = [r["content"] for r in result["results"]]
|
||||
assert any("vim" in c for c in contents)
|
||||
|
||||
def test_add_and_probe(self, provider):
|
||||
"""Add facts about an entity, then probe it."""
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Peppi uses Rust for systems work"}
|
||||
)
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Peppi prefers Neovim"}
|
||||
)
|
||||
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "probe", "entity": "peppi"}
|
||||
))
|
||||
assert result["count"] >= 1
|
||||
|
||||
def test_related(self, provider):
|
||||
"""Test related entity lookup."""
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Peppi uses Rust for systems work"}
|
||||
)
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Rust ensures memory safety"}
|
||||
)
|
||||
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "related", "entity": "rust"}
|
||||
))
|
||||
assert "results" in result
|
||||
assert "count" in result
|
||||
|
||||
def test_reason(self, provider):
|
||||
"""Test compositional reasoning across entities."""
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Peppi uses Rust for backend work"}
|
||||
)
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "The backend handles API requests"}
|
||||
)
|
||||
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "reason", "entities": ["peppi", "backend"]}
|
||||
))
|
||||
assert "results" in result
|
||||
|
||||
def test_feedback(self, provider):
|
||||
"""Test trust scoring via feedback."""
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Test feedback fact"}
|
||||
))
|
||||
fact_id = result["fact_id"]
|
||||
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_feedback", {"action": "helpful", "fact_id": fact_id}
|
||||
))
|
||||
assert "error" not in result
|
||||
|
||||
def test_update_and_remove(self, provider):
|
||||
"""Test CRUD operations."""
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Will be updated"}
|
||||
))
|
||||
fact_id = result["fact_id"]
|
||||
|
||||
# Update
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "update", "fact_id": fact_id, "content": "Updated content"}
|
||||
))
|
||||
assert result["updated"]
|
||||
|
||||
# Remove
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "remove", "fact_id": fact_id}
|
||||
))
|
||||
assert result["removed"]
|
||||
|
||||
def test_all_handlers_return_json(self, provider):
|
||||
"""Every tool call must return valid JSON."""
|
||||
# Add a fact first
|
||||
r = provider.handle_tool_call("fact_store", {"action": "add", "content": "JSON test"})
|
||||
parsed = json.loads(r)
|
||||
fact_id = parsed["fact_id"]
|
||||
|
||||
# Test every action
|
||||
actions = [
|
||||
("fact_store", {"action": "search", "query": "JSON"}),
|
||||
("fact_store", {"action": "list"}),
|
||||
("fact_store", {"action": "probe", "entity": "test"}),
|
||||
("fact_store", {"action": "related", "entity": "test"}),
|
||||
("fact_store", {"action": "reason", "entities": ["test"]}),
|
||||
("fact_store", {"action": "contradict"}),
|
||||
("fact_feedback", {"action": "helpful", "fact_id": fact_id}),
|
||||
]
|
||||
for tool_name, args in actions:
|
||||
result = provider.handle_tool_call(tool_name, args)
|
||||
json.loads(result) # Should not raise
|
||||
|
||||
|
||||
class TestPrefetch:
|
||||
def test_prefetch_returns_matching_facts(self, provider):
|
||||
"""Prefetch should return facts matching the query."""
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "The deploy pipeline uses Docker"}
|
||||
)
|
||||
result = provider.prefetch("deploy pipeline")
|
||||
assert "Docker" in result or "deploy" in result
|
||||
|
||||
def test_prefetch_empty_when_no_facts(self, provider):
|
||||
assert provider.prefetch("anything") == ""
|
||||
|
||||
|
||||
class TestSystemPromptBlock:
|
||||
def test_empty_when_no_facts(self, provider):
|
||||
assert provider.system_prompt_block() == ""
|
||||
|
||||
def test_shows_count_with_facts(self, provider):
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Fact one"}
|
||||
)
|
||||
provider.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Fact two"}
|
||||
)
|
||||
block = provider.system_prompt_block()
|
||||
assert "2 facts" in block
|
||||
assert "Holographic" in block
|
||||
|
||||
|
||||
class TestSessionEndHook:
|
||||
def test_extracts_preferences(self, provider):
|
||||
"""on_session_end should extract preference patterns."""
|
||||
provider._config["auto_extract"] = True
|
||||
messages = [
|
||||
{"role": "user", "content": "I prefer dark mode for all my editors"},
|
||||
{"role": "assistant", "content": "Noted, I'll remember that."},
|
||||
]
|
||||
provider.on_session_end(messages)
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "list"}
|
||||
))
|
||||
assert result["count"] >= 1
|
||||
|
||||
def test_skips_when_disabled(self, provider):
|
||||
"""on_session_end should do nothing when auto_extract is False."""
|
||||
provider._config["auto_extract"] = False
|
||||
messages = [
|
||||
{"role": "user", "content": "I prefer dark mode"},
|
||||
]
|
||||
provider.on_session_end(messages)
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "list"}
|
||||
))
|
||||
assert result["count"] == 0
|
||||
|
||||
def test_skips_assistant_messages(self, provider):
|
||||
"""Only user messages should be scanned."""
|
||||
provider._config["auto_extract"] = True
|
||||
messages = [
|
||||
{"role": "assistant", "content": "I prefer to help you with that"},
|
||||
]
|
||||
provider.on_session_end(messages)
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "list"}
|
||||
))
|
||||
assert result["count"] == 0
|
||||
|
||||
|
||||
class TestMemoryBridge:
|
||||
def test_mirrors_builtin_writes(self, provider):
|
||||
"""on_memory_write should store facts from the builtin memory tool."""
|
||||
provider.on_memory_write("add", "user", "Timezone: US Pacific")
|
||||
result = json.loads(provider.handle_tool_call(
|
||||
"fact_store", {"action": "search", "query": "timezone pacific"}
|
||||
))
|
||||
assert result["count"] >= 1
|
||||
|
||||
|
||||
class TestManagerIntegration:
|
||||
def test_coexists_with_builtin(self, provider):
|
||||
"""Holographic provider works alongside builtin in MemoryManager."""
|
||||
mgr = MemoryManager()
|
||||
mgr.add_provider(BuiltinMemoryProvider())
|
||||
mgr.add_provider(provider)
|
||||
|
||||
assert mgr.provider_names == ["builtin", "holographic"]
|
||||
|
||||
# Tools from holographic are available
|
||||
schemas = mgr.get_all_tool_schemas()
|
||||
names = {s["name"] for s in schemas}
|
||||
assert "fact_store" in names
|
||||
assert "fact_feedback" in names
|
||||
|
||||
# Tool routing works
|
||||
result = json.loads(mgr.handle_tool_call(
|
||||
"fact_store", {"action": "add", "content": "Manager integration test"}
|
||||
))
|
||||
assert result["status"] == "added"
|
||||
|
||||
# Memory bridge fires
|
||||
mgr.on_memory_write("add", "memory", "Test fact from builtin")
|
||||
result = json.loads(mgr.handle_tool_call(
|
||||
"fact_store", {"action": "search", "query": "test fact builtin"}
|
||||
))
|
||||
assert result["count"] >= 1
|
||||
Reference in New Issue
Block a user