Merge pull request #15926 from NousResearch/bb/tui-long-session-perf

perf(tui): stabilize long-session scrolling
This commit is contained in:
brooklyn!
2026-04-26 23:10:08 -05:00
committed by GitHub
100 changed files with 6824 additions and 662 deletions

View File

@@ -145,6 +145,7 @@ def test_launch_tui_exports_model_and_provider(monkeypatch, main_mod):
assert env["HERMES_INFERENCE_MODEL"] == "nous/hermes-test"
assert env["HERMES_TUI_PROVIDER"] == "nous"
assert env["HERMES_INFERENCE_PROVIDER"] == "nous"
assert env["NODE_ENV"] == "production"
def test_print_tui_exit_summary_includes_resume_and_token_totals(monkeypatch, capsys):

View File

@@ -222,6 +222,35 @@ class TestMessageStorage:
assert conv[0] == {"role": "user", "content": "Hello"}
assert conv[1] == {"role": "assistant", "content": "Hi!"}
def test_get_messages_as_conversation_includes_ancestor_chain(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="first prompt")
db.append_message("root", role="assistant", content="first answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="second prompt")
db.append_message("child", role="assistant", content="second answer")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv] == [
"first prompt",
"first answer",
"second prompt",
"second answer",
]
def test_get_messages_as_conversation_avoids_repeated_resume_prompts_from_ancestors(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="assistant", content="answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="next prompt")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv if m["role"] == "user"] == ["same prompt", "next prompt"]
def test_finish_reason_stored(self, db):
db.create_session(session_id="s1", source="cli")
db.append_message("s1", role="assistant", content="Done", finish_reason="stop")

View File

@@ -59,6 +59,69 @@ def test_write_json_returns_false_on_broken_pipe(monkeypatch):
assert server.write_json({"ok": True}) is False
def test_history_to_messages_preserves_tool_calls_for_resume_display():
history = [
{"role": "user", "content": "first prompt"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1",
"function": {
"name": "search_files",
"arguments": json.dumps({"pattern": "resume"}),
},
}
],
},
{"role": "tool", "content": "{}", "tool_call_id": "call_1"},
{"role": "assistant", "content": "first answer"},
{"role": "user", "content": "second prompt"},
]
assert server._history_to_messages(history) == [
{"role": "user", "text": "first prompt"},
{"context": "resume", "name": "search_files", "role": "tool"},
{"role": "assistant", "text": "first answer"},
{"role": "user", "text": "second prompt"},
]
def test_session_resume_uses_parent_lineage_for_display(monkeypatch):
captured = {}
class FakeDB:
def get_session(self, target):
return {"id": target}
def reopen_session(self, target):
captured["reopened"] = target
def get_messages_as_conversation(self, target, include_ancestors=False):
captured.setdefault("history_calls", []).append((target, include_ancestors))
return [
{"role": "user", "content": "root prompt"},
{"role": "assistant", "content": "root answer"},
] if include_ancestors else [{"role": "user", "content": "tip prompt"}]
monkeypatch.setattr(server, "_get_db", lambda: FakeDB())
monkeypatch.setattr(server, "_enable_gateway_prompts", lambda: None)
monkeypatch.setattr(server, "_set_session_context", lambda target: [])
monkeypatch.setattr(server, "_clear_session_context", lambda tokens: None)
monkeypatch.setattr(server, "_make_agent", lambda *args, **kwargs: types.SimpleNamespace(model="test"))
monkeypatch.setattr(server, "_session_info", lambda agent: {"model": "test", "tools": {}, "skills": {}})
monkeypatch.setattr(server, "_init_session", lambda sid, key, agent, history, cols=80: None)
resp = server.handle_request({"id": "1", "method": "session.resume", "params": {"session_id": "tip"}})
assert resp["result"]["messages"] == [
{"role": "user", "text": "root prompt"},
{"role": "assistant", "text": "root answer"},
]
assert captured["history_calls"] == [("tip", False), ("tip", True)]
def test_status_callback_emits_kind_and_text():
with patch("tui_gateway.server._emit") as emit:
cb = server._agent_cbs("sid")["status_callback"]
@@ -347,6 +410,35 @@ def test_complete_slash_includes_provider_alias():
assert any(item["text"] == "provider" for item in resp["result"]["items"])
def test_complete_slash_includes_tui_details_command():
resp = server.handle_request(
{"id": "1", "method": "complete.slash", "params": {"text": "/det"}}
)
assert any(item["text"] == "/details" for item in resp["result"]["items"])
def test_complete_slash_details_args():
resp_root = server.handle_request(
{"id": "0", "method": "complete.slash", "params": {"text": "/details"}}
)
resp_section = server.handle_request(
{"id": "1", "method": "complete.slash", "params": {"text": "/details t"}}
)
resp_mode = server.handle_request(
{
"id": "2",
"method": "complete.slash",
"params": {"text": "/details thinking e"},
}
)
assert resp_root["result"]["replace_from"] == len("/details")
assert any(item["text"] == " thinking" for item in resp_root["result"]["items"])
assert any(item["text"] == "thinking" for item in resp_section["result"]["items"])
assert any(item["text"] == "expanded" for item in resp_mode["result"]["items"])
def test_config_set_reasoning_updates_live_session_and_agent(tmp_path, monkeypatch):
monkeypatch.setattr(server, "_hermes_home", tmp_path)
agent = types.SimpleNamespace(reasoning_config=None)