feat(honcho): explain why when honcho_profile returns an empty card

Closed PR #5137 addressed the retrieval path (peer cards via get_card()
instead of the session-scoped lookup that returned empty for per-session
messaging flows) — that architectural fix is already in main as
_fetch_peer_card / _fetch_peer_context.

What never got fixed is the user-visible side: honcho_profile returning
a flat 'No profile facts available yet.' leaves the model to guess at
why.  The model then often surfaces it to the user as a cryptic error.

Adds a diagnostic hint next to the existing 'result' message, enumerating
the likely causes in rough order of frequency:

  1. Observation disabled for this peer (user_observe_me/others off)
  2. Peer card hasn't accumulated yet (fresh peer / dialectic cadence
     hasn't fired enough turns — cards build over time)
  3. Generic fallback: self-hosted Honcho < 3.x lacks peer cards

The hint also suggests alternative tools (honcho_reasoning / honcho_search)
so the model can route around the empty card rather than giving up.

Schema description updated so the model knows the hint field exists and
that an empty card is NOT an error state.

7 tests cover the hint paths: warmup, observation-disabled for user + ai,
generic fallback, populated card still returns plain result (no hint),
alternative-tool suggestion present.
This commit is contained in:
Erosika
2026-04-27 12:36:35 -04:00
committed by kshitij
parent 5883df5574
commit 894e0b935b
2 changed files with 147 additions and 2 deletions

View File

@@ -38,7 +38,10 @@ PROFILE_SCHEMA = {
"description": (
"Retrieve or update a peer card from Honcho — a curated list of key facts "
"about that peer (name, role, preferences, communication style, patterns). "
"Pass `card` to update; omit `card` to read."
"Pass `card` to update; omit `card` to read. If the card is empty, the "
"result includes a `hint` field explaining why (observation disabled, "
"fresh peer, dialectic layer still warming up, etc.) — this is NOT an "
"error. Peer cards accumulate over time from observed conversation."
),
"parameters": {
"type": "object",
@@ -1057,6 +1060,63 @@ class HonchoMemoryProvider(MemoryProvider):
return chunks
def _empty_profile_hint(self, peer: str) -> Dict[str, Any]:
"""Build a diagnostic hint when honcho_profile returns an empty card.
A literal "No profile facts available yet." tells the model nothing
about WHY. The model then often surfaces it to the user as a cryptic
error. This hint enumerates the likely causes so the model can
explain the situation (or retry with a different peer).
Ordered by likelihood for a typical deployment:
1. Observation is disabled for this peer
2. Card hasn't accumulated yet (fresh peer, not enough dialectic
cycles — dialectic cadence runs every N turns)
3. Self-hosted Honcho backend doesn't support peer cards
(honcho-ai server < 3.x)
"""
cfg = self._config
reasons: List[str] = []
if cfg is not None:
if peer == "user":
observe_me = bool(getattr(cfg, "user_observe_me", True))
observe_others = bool(getattr(cfg, "user_observe_others", True))
else:
observe_me = bool(getattr(cfg, "ai_observe_me", True))
observe_others = bool(getattr(cfg, "ai_observe_others", True))
if not (observe_me or observe_others):
reasons.append(
f"observation is disabled for peer '{peer}' "
f"(user_observe_me/ai_observe_me in config)"
)
cadence = getattr(self, "_dialectic_cadence", 1)
turn = getattr(self, "_turn_count", 0)
if turn < max(2, cadence):
reasons.append(
f"this session has only {turn} turn(s); peer cards accumulate "
f"as the dialectic layer reasons over conversation history "
f"(cadence every {cadence} turn(s))"
)
if not reasons:
reasons.append(
"peer card has no facts yet — Honcho's dialectic layer builds "
"this over time from observed turns; self-hosted Honcho < 3.x "
"does not support peer cards at all"
)
return {
"result": "No profile facts available yet.",
"hint": (
"This is not an error. "
+ "; ".join(reasons)
+ ". Try honcho_reasoning for a synthesized answer, or "
"honcho_search to query raw conversation excerpts."
),
}
def sync_turn(self, user_content: str, assistant_content: str, *, session_id: str = "") -> None:
"""Record the conversation turn in Honcho (non-blocking).
@@ -1169,7 +1229,7 @@ class HonchoMemoryProvider(MemoryProvider):
return json.dumps({"result": f"Peer card updated ({len(result)} facts).", "card": result})
card = self._manager.get_peer_card(self._session_key, peer=peer)
if not card:
return json.dumps({"result": "No profile facts available yet."})
return json.dumps(self._empty_profile_hint(peer))
return json.dumps({"result": card})
elif tool_name == "honcho_search":

View File

@@ -0,0 +1,85 @@
"""Tests for honcho_profile's empty-card hint (#5137 follow-up)."""
from __future__ import annotations
import json
from unittest.mock import MagicMock
from plugins.memory.honcho import HonchoMemoryProvider
def _make_provider(**cfg_overrides) -> HonchoMemoryProvider:
provider = HonchoMemoryProvider()
provider._manager = MagicMock()
provider._manager.get_peer_card.return_value = [] # empty card
provider._session_key = "agent:main:test"
provider._session_initialized = True # bypass the lazy _ensure_session() gate
provider._cron_skipped = False
cfg = MagicMock()
# Defaults match HonchoClientConfig defaults
cfg.user_observe_me = cfg_overrides.get("user_observe_me", True)
cfg.user_observe_others = cfg_overrides.get("user_observe_others", True)
cfg.ai_observe_me = cfg_overrides.get("ai_observe_me", True)
cfg.ai_observe_others = cfg_overrides.get("ai_observe_others", True)
cfg.message_max_chars = 25000
provider._config = cfg
provider._dialectic_cadence = cfg_overrides.get("dialectic_cadence", 1)
provider._turn_count = cfg_overrides.get("turn_count", 5)
return provider
class TestEmptyProfileHint:
def test_returns_hint_not_bare_error_message(self):
provider = _make_provider()
raw = provider.handle_tool_call("honcho_profile", {})
payload = json.loads(raw)
assert payload["result"] == "No profile facts available yet."
assert "hint" in payload
assert "not an error" in payload["hint"].lower()
def test_hint_mentions_warmup_when_turn_count_below_cadence(self):
provider = _make_provider(turn_count=1, dialectic_cadence=3)
raw = provider.handle_tool_call("honcho_profile", {})
payload = json.loads(raw)
assert "turn" in payload["hint"].lower()
assert "cadence" in payload["hint"].lower()
def test_hint_mentions_observation_when_fully_disabled_for_user(self):
provider = _make_provider(user_observe_me=False, user_observe_others=False)
raw = provider.handle_tool_call("honcho_profile", {"peer": "user"})
payload = json.loads(raw)
assert "observation is disabled" in payload["hint"].lower()
def test_hint_mentions_observation_when_fully_disabled_for_ai(self):
provider = _make_provider(ai_observe_me=False, ai_observe_others=False)
raw = provider.handle_tool_call("honcho_profile", {"peer": "ai"})
payload = json.loads(raw)
assert "observation is disabled" in payload["hint"].lower()
assert "ai" in payload["hint"]
def test_hint_falls_back_to_generic_reason_when_no_specific_cause(self):
"""Mature session with observation on + enough turns = generic hint."""
provider = _make_provider(turn_count=50, dialectic_cadence=1)
raw = provider.handle_tool_call("honcho_profile", {})
payload = json.loads(raw)
assert "hint" in payload
# Generic hint mentions self-hosted as a common cause
assert any(word in payload["hint"].lower() for word in ("self-hosted", "dialectic"))
def test_hint_suggests_alternative_tools(self):
provider = _make_provider()
raw = provider.handle_tool_call("honcho_profile", {})
payload = json.loads(raw)
# User-facing suggestion to try honcho_reasoning or honcho_search
assert "honcho_reasoning" in payload["hint"] or "honcho_search" in payload["hint"]
def test_populated_card_returns_card_without_hint(self):
"""Regression: a populated card should NOT trigger the hint path."""
provider = _make_provider()
provider._manager.get_peer_card.return_value = ["Fact 1", "Fact 2"]
raw = provider.handle_tool_call("honcho_profile", {})
payload = json.loads(raw)
assert payload["result"] == ["Fact 1", "Fact 2"]
assert "hint" not in payload