fix(tui): restore resumed transcript lineage

This commit is contained in:
Brooklyn Nicholson
2026-04-26 15:16:12 -05:00
parent 350ee1bf23
commit d4dde6b5f2
11 changed files with 537 additions and 49 deletions

View File

@@ -1132,20 +1132,29 @@ class SessionDB:
current = child_id
return session_id
def get_messages_as_conversation(self, session_id: str) -> List[Dict[str, Any]]:
def get_messages_as_conversation(
self, session_id: str, include_ancestors: bool = False
) -> List[Dict[str, Any]]:
"""
Load messages in the OpenAI conversation format (role + content dicts).
Used by the gateway to restore conversation history.
"""
session_ids = [session_id]
if include_ancestors:
session_ids = self._session_lineage_root_to_tip(session_id)
with self._lock:
cursor = self._conn.execute(
"SELECT role, content, tool_call_id, tool_calls, tool_name, "
"reasoning, reasoning_content, reasoning_details, codex_reasoning_items, "
"codex_message_items "
"FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
rows = cursor.fetchall()
rows = []
for sid in session_ids:
cursor = self._conn.execute(
"SELECT role, content, tool_call_id, tool_calls, tool_name, "
"reasoning, reasoning_content, reasoning_details, codex_reasoning_items, "
"codex_message_items "
"FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(sid,),
)
rows.extend(cursor.fetchall())
messages = []
for row in rows:
msg = {"role": row["role"], "content": row["content"]}
@@ -1185,9 +1194,47 @@ class SessionDB:
except (json.JSONDecodeError, TypeError):
logger.warning("Failed to deserialize codex_message_items, falling back to None")
msg["codex_message_items"] = None
if include_ancestors and self._is_duplicate_replayed_user_message(messages, msg):
continue
messages.append(msg)
return messages
def _session_lineage_root_to_tip(self, session_id: str) -> List[str]:
if not session_id:
return [session_id]
chain = []
current = session_id
seen = set()
with self._lock:
for _ in range(100):
if not current or current in seen:
break
seen.add(current)
chain.append(current)
row = self._conn.execute(
"SELECT parent_session_id FROM sessions WHERE id = ?",
(current,),
).fetchone()
if row is None:
break
current = row["parent_session_id"] if hasattr(row, "keys") else row[0]
return list(reversed(chain)) or [session_id]
@staticmethod
def _is_duplicate_replayed_user_message(messages: List[Dict[str, Any]], msg: Dict[str, Any]) -> bool:
if msg.get("role") != "user":
return False
content = msg.get("content")
if not isinstance(content, str) or not content:
return False
for prev in reversed(messages):
if prev.get("role") == "user" and prev.get("content") == content:
return True
if prev.get("role") == "assistant" and (prev.get("content") or prev.get("tool_calls")):
return False
return False
# =========================================================================
# Search
# =========================================================================

View File

@@ -222,6 +222,35 @@ class TestMessageStorage:
assert conv[0] == {"role": "user", "content": "Hello"}
assert conv[1] == {"role": "assistant", "content": "Hi!"}
def test_get_messages_as_conversation_includes_ancestor_chain(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="first prompt")
db.append_message("root", role="assistant", content="first answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="second prompt")
db.append_message("child", role="assistant", content="second answer")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv] == [
"first prompt",
"first answer",
"second prompt",
"second answer",
]
def test_get_messages_as_conversation_avoids_repeated_resume_prompts_from_ancestors(self, db):
db.create_session("root", "tui")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="user", content="same prompt")
db.append_message("root", role="assistant", content="answer")
db.create_session("child", "tui", parent_session_id="root")
db.append_message("child", role="user", content="next prompt")
conv = db.get_messages_as_conversation("child", include_ancestors=True)
assert [m["content"] for m in conv if m["role"] == "user"] == ["same prompt", "next prompt"]
def test_finish_reason_stored(self, db):
db.create_session(session_id="s1", source="cli")
db.append_message("s1", role="assistant", content="Done", finish_reason="stop")

View File

@@ -59,6 +59,69 @@ def test_write_json_returns_false_on_broken_pipe(monkeypatch):
assert server.write_json({"ok": True}) is False
def test_history_to_messages_preserves_tool_calls_for_resume_display():
history = [
{"role": "user", "content": "first prompt"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1",
"function": {
"name": "search_files",
"arguments": json.dumps({"pattern": "resume"}),
},
}
],
},
{"role": "tool", "content": "{}", "tool_call_id": "call_1"},
{"role": "assistant", "content": "first answer"},
{"role": "user", "content": "second prompt"},
]
assert server._history_to_messages(history) == [
{"role": "user", "text": "first prompt"},
{"context": "resume", "name": "search_files", "role": "tool"},
{"role": "assistant", "text": "first answer"},
{"role": "user", "text": "second prompt"},
]
def test_session_resume_uses_parent_lineage_for_display(monkeypatch):
captured = {}
class FakeDB:
def get_session(self, target):
return {"id": target}
def reopen_session(self, target):
captured["reopened"] = target
def get_messages_as_conversation(self, target, include_ancestors=False):
captured.setdefault("history_calls", []).append((target, include_ancestors))
return [
{"role": "user", "content": "root prompt"},
{"role": "assistant", "content": "root answer"},
] if include_ancestors else [{"role": "user", "content": "tip prompt"}]
monkeypatch.setattr(server, "_get_db", lambda: FakeDB())
monkeypatch.setattr(server, "_enable_gateway_prompts", lambda: None)
monkeypatch.setattr(server, "_set_session_context", lambda target: [])
monkeypatch.setattr(server, "_clear_session_context", lambda tokens: None)
monkeypatch.setattr(server, "_make_agent", lambda *args, **kwargs: types.SimpleNamespace(model="test"))
monkeypatch.setattr(server, "_session_info", lambda agent: {"model": "test", "tools": {}, "skills": {}})
monkeypatch.setattr(server, "_init_session", lambda sid, key, agent, history, cols=80: None)
resp = server.handle_request({"id": "1", "method": "session.resume", "params": {"session_id": "tip"}})
assert resp["result"]["messages"] == [
{"role": "user", "text": "root prompt"},
{"role": "assistant", "text": "root answer"},
]
assert captured["history_calls"] == [("tip", False), ("tip", True)]
def test_status_callback_emits_kind_and_text():
with patch("tui_gateway.server._emit") as emit:
cb = server._agent_cbs("sid")["status_callback"]

View File

@@ -913,8 +913,16 @@ def _probe_config_health(cfg: dict) -> str:
def _session_info(agent) -> dict:
reasoning_config = getattr(agent, "reasoning_config", None)
reasoning_effort = ""
if isinstance(reasoning_config, dict) and reasoning_config.get("enabled") is not False:
reasoning_effort = str(reasoning_config.get("effort", "") or "")
service_tier = getattr(agent, "service_tier", None) or ""
info: dict = {
"model": getattr(agent, "model", ""),
"reasoning_effort": reasoning_effort,
"service_tier": service_tier,
"fast": service_tier == "priority",
"tools": {},
"skills": {},
"cwd": os.getcwd(),
@@ -1013,7 +1021,7 @@ def _tool_summary(name: str, result: str, duration_s: float | None) -> str | Non
if n is not None:
text = f"Extracted {n} {'page' if n == 1 else 'pages'}"
return f"{text or 'Completed'}{suffix}" if (text or dur) else None
return f"{text}{suffix}" if text else None
def _on_tool_start(sid: str, tool_call_id: str, name: str, args: dict):
@@ -1029,10 +1037,13 @@ def _on_tool_start(sid: str, tool_call_id: str, name: str, args: dict):
pass
session.setdefault("tool_started_at", {})[tool_call_id] = time.time()
if _tool_progress_enabled(sid):
payload = {"tool_id": tool_call_id, "name": name, "context": _tool_ctx(name, args)}
if name == "todo" and isinstance(args, dict) and isinstance(args.get("todos"), list):
payload["todos"] = args.get("todos")
_emit(
"tool.start",
sid,
{"tool_id": tool_call_id, "name": name, "context": _tool_ctx(name, args)},
payload,
)
@@ -1050,6 +1061,13 @@ def _on_tool_complete(sid: str, tool_call_id: str, name: str, args: dict, result
summary = _tool_summary(name, result, duration_s)
if summary:
payload["summary"] = summary
if name == "todo":
try:
data = json.loads(result)
if isinstance(data, dict) and isinstance(data.get("todos"), list):
payload["todos"] = data.get("todos")
except Exception:
pass
try:
from agent.display import render_edit_diff_with_delta
@@ -1698,7 +1716,8 @@ def _(rid, params: dict) -> dict:
try:
db.reopen_session(target)
history = db.get_messages_as_conversation(target)
messages = _history_to_messages(history)
display_history = db.get_messages_as_conversation(target, include_ancestors=True)
messages = _history_to_messages(display_history)
tokens = _set_session_context(target)
try:
agent = _make_agent(sid, target, session_id=target)
@@ -1746,11 +1765,20 @@ def _(rid, params: dict) -> dict:
@method("session.history")
def _(rid, params: dict) -> dict:
session, err = _sess(params, rid)
return err or _ok(
if err:
return err
history = list(session.get("history", []))
db = _get_db()
if db is not None and session.get("session_key"):
try:
history = db.get_messages_as_conversation(session["session_key"], include_ancestors=True)
except Exception:
pass
return _ok(
rid,
{
"count": len(session.get("history", [])),
"messages": _history_to_messages(list(session.get("history", []))),
"messages": _history_to_messages(history),
},
)

View File

@@ -1,7 +1,26 @@
import { describe, expect, it } from 'vitest'
import { toTranscriptMessages } from '../domain/messages.js'
import { upsert } from '../lib/messages.js'
describe('toTranscriptMessages', () => {
it('preserves assistant tool-call rows so resume does not drop prior turns', () => {
const rows = [
{ role: 'user', text: 'first prompt' },
{ role: 'tool', context: 'repo', name: 'search_files', text: 'ignored raw result' },
{ role: 'assistant', text: 'first answer' },
{ role: 'user', text: 'second prompt' }
]
expect(toTranscriptMessages(rows).map(msg => [msg.role, msg.text])).toEqual([
['user', 'first prompt'],
['assistant', 'first answer'],
['user', 'second prompt']
])
expect(toTranscriptMessages(rows)[1]?.tools?.[0]).toContain('Search Files')
})
})
describe('upsert', () => {
it('appends when last role differs', () => {
expect(upsert([{ role: 'user', text: 'hi' }], 'assistant', 'hello')).toHaveLength(2)

View File

@@ -1,14 +1,18 @@
import { describe, expect, it } from 'vitest'
import {
boundedLiveRenderText,
buildToolTrailLine,
edgePreview,
estimateRows,
estimateTokensRough,
fmtK,
isToolTrailResultLine,
lastCotTrailIndex,
parseToolTrailResultLine,
pasteTokenLabel,
sameToolTrailGroup
sameToolTrailGroup,
splitToolDuration
} from '../lib/text.js'
describe('isToolTrailResultLine', () => {
@@ -19,6 +23,16 @@ describe('isToolTrailResultLine', () => {
})
})
describe('buildToolTrailLine', () => {
it('puts completion duration inline before the result marker', () => {
const line = buildToolTrailLine('read_file', 'x', false, '', 0.94)
expect(line).toBe('Read File("x") (0.9s) ✓')
expect(parseToolTrailResultLine(line)).toEqual({ call: 'Read File("x") (0.9s)', detail: '', mark: '✓' })
expect(splitToolDuration('Read File("x") (0.9s)')).toEqual({ label: 'Read File("x")', duration: ' (0.9s)' })
})
})
describe('lastCotTrailIndex', () => {
it('finds last non-result line', () => {
expect(lastCotTrailIndex(['a ✓', 'thinking…'])).toBe(1)
@@ -68,6 +82,28 @@ describe('estimateTokensRough', () => {
})
})
describe('boundedLiveRenderText', () => {
it('preserves short live text verbatim', () => {
expect(boundedLiveRenderText('one\ntwo', { maxChars: 100, maxLines: 10 })).toBe('one\ntwo')
})
it('keeps the live tail by character budget', () => {
const out = boundedLiveRenderText('abcdefghij', { maxChars: 4, maxLines: 10 })
expect(out).toContain('ghij')
expect(out).toContain('omitted')
expect(out).not.toContain('abcdef')
})
it('keeps the live tail by line budget', () => {
const out = boundedLiveRenderText(['a', 'b', 'c', 'd'].join('\n'), { maxChars: 100, maxLines: 2 })
expect(out).toContain('c\nd')
expect(out).toContain('omitted 2 lines')
expect(out).not.toContain('a\nb')
})
})
describe('edgePreview', () => {
it('keeps both ends for long text', () => {
expect(edgePreview('Vampire Bondage ropes slipped from her neck, still stained with blood', 8, 18)).toBe(

View File

@@ -2,18 +2,20 @@ import {
REASONING_PULSE_MS,
STREAM_BATCH_MS,
STREAM_IDLE_BATCH_MS,
STREAM_SCROLL_BATCH_MS,
STREAM_TYPING_BATCH_MS
} from '../config/timing.js'
import type { SessionInterruptResponse, SubagentEventPayload } from '../gatewayTypes.js'
import { hasReasoningTag, splitReasoning } from '../lib/reasoning.js'
import {
boundedLiveRenderText,
buildToolTrailLine,
estimateTokensRough,
isTransientTrailLine,
sameToolTrailGroup,
toolTrailLabel
} from '../lib/text.js'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress } from '../types.js'
import type { ActiveTool, ActivityItem, Msg, SubagentProgress, TodoItem } from '../types.js'
import { resetFlowOverlays } from './overlayStore.js'
import { pushSnapshot } from './spawnHistoryStore.js'
@@ -40,7 +42,52 @@ const diffSegmentBody = (msg: Msg): null | string => {
const hasDetails = (msg: Msg): boolean => Boolean(msg.thinking || msg.tools?.length || msg.toolTokens)
const textSegments = (segments: Msg[]) => segments.filter(msg => msg.role === 'assistant' && msg.kind !== 'diff').map(msg => msg.text)
const isToolOnly = (msg: Msg | undefined) =>
Boolean(msg && msg.kind === 'trail' && !msg.thinking?.trim() && !msg.text && msg.tools?.length)
const mergeSequentialToolOnly = (segments: Msg[]) =>
segments.reduce<Msg[]>((acc, msg) => {
if (isToolOnly(msg) && isToolOnly(acc.at(-1))) {
const prev = acc.at(-1)!
return [...acc.slice(0, -1), { ...prev, tools: [...(prev.tools ?? []), ...(msg.tools ?? [])] }]
}
return [...acc, msg]
}, [])
const isTodoStatus = (status: unknown): status is TodoItem['status'] =>
status === 'pending' || status === 'in_progress' || status === 'completed' || status === 'cancelled'
const parseTodos = (value: unknown): null | TodoItem[] => {
if (!Array.isArray(value)) {
return null
}
return value
.map(item => {
if (!item || typeof item !== 'object') {
return null
}
const row = item as Record<string, unknown>
const status = row.status
if (!isTodoStatus(status)) {
return null
}
return {
content: String(row.content ?? '').trim(),
id: String(row.id ?? '').trim(),
status
}
})
.filter((item): item is TodoItem => Boolean(item?.id && item.content))
}
const textSegments = (segments: Msg[]) =>
segments.filter(msg => msg.role === 'assistant' && msg.kind !== 'diff').map(msg => msg.text)
const finalTail = (finalText: string, segments: Msg[]) => {
let tail = finalText
@@ -88,6 +135,7 @@ class TurnController {
turnTools: string[] = []
private activeTools: ActiveTool[] = []
private activeReasoningText = ''
private reasoningSegmentIndex: null | number = null
private activityId = 0
private reasoningStreamingTimer: Timer = null
@@ -100,12 +148,18 @@ class TurnController {
this.streamDelay = STREAM_TYPING_BATCH_MS
}
boostStreamingForScroll() {
this.streamDelay = Math.max(this.streamDelay, STREAM_SCROLL_BATCH_MS)
}
relaxStreaming() {
this.streamDelay = STREAM_IDLE_BATCH_MS
}
clearReasoning() {
this.reasoningTimer = clear(this.reasoningTimer)
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
this.reasoningText = ''
this.toolTokenAcc = 0
patchTurnState({ reasoning: '', reasoningTokens: 0, toolTokens: 0 })
@@ -144,6 +198,8 @@ class TurnController {
this.interrupted = true
gw.request<SessionInterruptResponse>('session.interrupt', { session_id: sid }).catch(() => {})
this.closeReasoningSegment()
const segments = this.segmentMessages
const partial = this.bufRef.trimStart()
const tools = this.pendingSegmentTools
@@ -193,7 +249,7 @@ class TurnController {
}
private syncReasoningSegment() {
const thinking = this.reasoningText.trim()
const thinking = this.activeReasoningText.trim()
if (!thinking) {
return
@@ -205,8 +261,7 @@ class TurnController {
text: '',
thinking,
thinkingTokens: estimateTokensRough(thinking),
toolTokens: this.toolTokenAcc || undefined,
...(this.pendingSegmentTools.length && { tools: this.pendingSegmentTools })
toolTokens: this.toolTokenAcc || undefined
}
if (this.reasoningSegmentIndex === null) {
@@ -219,13 +274,40 @@ class TurnController {
patchTurnState({ streamSegments: this.segmentMessages })
}
private closeReasoningSegment() {
this.syncReasoningSegment()
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
}
private pushSegment(msg: Msg) {
if (isToolOnly(msg) && isToolOnly(this.segmentMessages.at(-1)!)) {
const prev = this.segmentMessages.at(-1)!
this.segmentMessages = [
...this.segmentMessages.slice(0, -1),
{ ...prev, tools: [...(prev.tools ?? []), ...(msg.tools ?? [])] }
]
return
}
this.segmentMessages = [...this.segmentMessages, msg]
}
flushStreamingSegment() {
const raw = this.bufRef.trimStart()
const split = raw ? (hasReasoningTag(raw) ? splitReasoning(raw) : { reasoning: '', text: raw }) : { reasoning: '', text: '' }
const split = raw
? hasReasoningTag(raw)
? splitReasoning(raw)
: { reasoning: '', text: raw }
: { reasoning: '', text: '' }
if (split.reasoning && !this.reasoningText.trim()) {
this.reasoningText = split.reasoning
this.activeReasoningText = split.reasoning
patchTurnState({ reasoning: this.reasoningText, reasoningTokens: estimateTokensRough(this.reasoningText) })
this.syncReasoningSegment()
}
const msg: Msg = {
@@ -238,7 +320,7 @@ class TurnController {
this.streamTimer = clear(this.streamTimer)
if (split.text || hasDetails(msg)) {
this.segmentMessages = [...this.segmentMessages, msg]
this.pushSegment(msg)
}
this.pendingSegmentTools = []
@@ -256,6 +338,31 @@ class TurnController {
}, REASONING_PULSE_MS)
}
recordTodos(value: unknown) {
const todos = parseTodos(value)
if (todos !== null) {
patchTurnState({ todos })
}
}
private flushPendingToolsIntoLastSegment() {
const last = this.segmentMessages[this.segmentMessages.length - 1]
if (!this.pendingSegmentTools.length || !isToolOnly(last)) {
return false
}
this.segmentMessages = [
...this.segmentMessages.slice(0, -1),
{ ...last, tools: [...(last.tools ?? []), ...this.pendingSegmentTools] }
]
this.pendingSegmentTools = []
patchTurnState({ streamPendingTools: [], streamSegments: this.segmentMessages })
return true
}
pushInlineDiffSegment(diffText: string, tools: string[] = []) {
// Strip CLI chrome the gateway emits before the unified diff (e.g. a
// leading "┊ review diff" header written by `_emit_inline_diff` for the
@@ -283,7 +390,10 @@ class TurnController {
return
}
this.segmentMessages = [...this.segmentMessages, { kind: 'diff', role: 'assistant', text: block, ...(tools.length && { tools }) }]
this.segmentMessages = [
...this.segmentMessages,
{ kind: 'diff', role: 'assistant', text: block, ...(tools.length && { tools }) }
]
patchTurnState({ streamSegments: this.segmentMessages })
}
@@ -328,13 +438,25 @@ class TurnController {
}
recordMessageComplete(payload: { rendered?: string; reasoning?: string; text?: string }) {
this.closeReasoningSegment()
const rawText = (payload.rendered ?? payload.text ?? this.bufRef).trimStart()
const split = splitReasoning(rawText)
const finalText = finalTail(split.text, this.segmentMessages)
const existingReasoning = this.reasoningText.trim() || String(payload.reasoning ?? '').trim()
const savedReasoning = [existingReasoning, existingReasoning ? '' : split.reasoning].filter(Boolean).join('\n\n')
const savedToolTokens = this.toolTokenAcc
const tools = this.pendingSegmentTools
let tools = this.pendingSegmentTools
const last = this.segmentMessages[this.segmentMessages.length - 1]
if (tools.length && isToolOnly(last)) {
this.segmentMessages = [
...this.segmentMessages.slice(0, -1),
{ ...last, tools: [...(last.tools ?? []), ...tools] }
]
this.pendingSegmentTools = []
tools = []
}
// Drop diff-only segments the agent is about to narrate in the final
// reply. Without this, a closing "here's the diff …" message would
@@ -343,13 +465,19 @@ class TurnController {
// assistant narration stays put.
const finalHasOwnDiffFence = /```(?:diff|patch)\b/i.test(finalText)
const segments = this.segmentMessages.filter(msg => {
const body = diffSegmentBody(msg)
const segments = mergeSequentialToolOnly(
this.segmentMessages.filter(msg => {
const body = diffSegmentBody(msg)
return body === null || (!finalHasOwnDiffFence && !finalText.includes(body))
})
return body === null || (!finalHasOwnDiffFence && !finalText.includes(body))
})
)
const hasReasoningSegment =
this.reasoningSegmentIndex !== null || segments.some(msg => Boolean(msg.thinking?.trim()))
const finalThinking = hasReasoningSegment ? '' : savedReasoning.trim()
const finalThinking = savedReasoning.trim()
const finalDetails: Msg = {
kind: 'trail',
role: 'system',
@@ -359,8 +487,8 @@ class TurnController {
toolTokens: savedToolTokens || undefined,
...(tools.length && { tools })
}
const hasReasoningSegment = this.reasoningSegmentIndex !== null
const finalMessages = hasDetails(finalDetails) && !hasReasoningSegment ? [...segments, finalDetails] : [...segments]
const finalMessages = hasDetails(finalDetails) ? [...segments, finalDetails] : [...segments]
if (finalText) {
finalMessages.push({ role: 'assistant', text: finalText })
@@ -387,6 +515,7 @@ class TurnController {
this.turnTools = []
this.persistedToolLabels.clear()
this.bufRef = ''
this.interrupted = false
patchTurnState({ activity: [], outcome: '' })
return { finalMessages, finalText, wasInterrupted }
@@ -419,6 +548,7 @@ class TurnController {
}
this.reasoningText = incoming
this.activeReasoningText = incoming
this.scheduleReasoning()
this.syncReasoningSegment()
this.pulseReasoningStreaming()
@@ -429,30 +559,63 @@ class TurnController {
return
}
if (!this.activeReasoningText.trim() && this.pendingSegmentTools.length) {
this.flushStreamingSegment()
}
this.reasoningText += text
this.activeReasoningText += text
if (this.reasoningText.length > 80_000) {
this.reasoningText = this.reasoningText.slice(-60_000)
}
this.scheduleReasoning()
this.syncReasoningSegment()
this.pulseReasoningStreaming()
}
recordToolComplete(toolId: string, fallbackName?: string, error?: string, summary?: string) {
const line = this.completeTool(toolId, fallbackName, error, summary)
recordToolComplete(
toolId: string,
fallbackName?: string,
error?: string,
summary?: string,
duration?: number,
todos?: unknown
) {
this.recordTodos(todos)
const line = this.completeTool(toolId, fallbackName, error, summary, duration)
this.pendingSegmentTools = [...this.pendingSegmentTools, line]
this.flushPendingToolsIntoLastSegment()
this.publishToolState()
}
recordInlineDiffToolComplete(diffText: string, toolId: string, fallbackName?: string, error?: string) {
recordInlineDiffToolComplete(
diffText: string,
toolId: string,
fallbackName?: string,
error?: string,
duration?: number
) {
this.flushStreamingSegment()
this.pushInlineDiffSegment(diffText, [this.completeTool(toolId, fallbackName, error, '')])
this.pushInlineDiffSegment(diffText, [this.completeTool(toolId, fallbackName, error, '', duration)])
this.publishToolState()
}
private completeTool(toolId: string, fallbackName?: string, error?: string, summary?: string) {
private completeTool(toolId: string, fallbackName?: string, error?: string, summary?: string, duration?: number) {
const done = this.activeTools.find(tool => tool.id === toolId)
const name = done?.name ?? fallbackName ?? 'tool'
const label = toolTrailLabel(name)
const line = buildToolTrailLine(name, done?.context || '', Boolean(error), error || summary || '')
const fallbackDuration = done?.startedAt ? (Date.now() - done.startedAt) / 1000 : undefined
const line = buildToolTrailLine(
name,
done?.context || '',
Boolean(error),
error || summary || '',
duration ?? fallbackDuration
)
this.activeTools = this.activeTools.filter(tool => tool.id !== toolId)
@@ -496,6 +659,7 @@ class TurnController {
recordToolStart(toolId: string, name: string, context: string) {
this.flushStreamingSegment()
this.closeReasoningSegment()
this.pruneTransient()
this.endReasoningPhase()
@@ -514,6 +678,7 @@ class TurnController {
this.bufRef = ''
this.interrupted = false
this.lastStatusNote = ''
this.activeReasoningText = ''
this.pendingSegmentTools = []
this.protocolWarned = false
this.reasoningSegmentIndex = null
@@ -552,7 +717,7 @@ class TurnController {
this.streamTimer = null
const raw = this.bufRef.trimStart()
const visible = hasReasoningTag(raw) ? splitReasoning(raw).text : raw
patchTurnState({ streaming: visible })
patchTurnState({ streaming: boundedLiveRenderText(visible) })
}, this.streamDelay)
}
@@ -560,6 +725,8 @@ class TurnController {
this.endReasoningPhase()
this.clearReasoning()
this.activeTools = []
this.activeReasoningText = ''
this.reasoningSegmentIndex = null
this.turnTools = []
this.toolTokenAcc = 0
this.persistedToolLabels.clear()

View File

@@ -5,9 +5,9 @@ import { LONG_MSG } from '../config/limits.js'
import { sectionMode } from '../domain/details.js'
import { userDisplay } from '../domain/messages.js'
import { ROLE } from '../domain/roles.js'
import { compactPreview, hasAnsi, isPasteBackedText, stripAnsi } from '../lib/text.js'
import { boundedLiveRenderText, compactPreview, hasAnsi, isPasteBackedText, stripAnsi } from '../lib/text.js'
import type { Theme } from '../theme.js'
import type { DetailsMode, Msg, SectionVisibility } from '../types.js'
import type { ActiveTool, DetailsMode, Msg, SectionVisibility } from '../types.js'
import { Md } from './markdown.js'
import { ToolTrail } from './thinking.js'
@@ -20,7 +20,8 @@ export const MessageLine = memo(function MessageLine({
isStreaming = false,
msg,
sections,
t
t,
tools = []
}: MessageLineProps) {
// Per-section overrides win over the global mode, so resolve each section
// we might consume here once and gate visibility on the *content-bearing*
@@ -34,7 +35,7 @@ export const MessageLine = memo(function MessageLine({
const activityMode = sectionMode('activity', detailsMode, sections, detailsModeCommandOverride)
const thinking = msg.thinking?.trim() ?? ''
if (msg.kind === 'trail' && (msg.tools?.length || thinking)) {
if (msg.kind === 'trail' && (msg.tools?.length || tools.length || thinking)) {
return thinkingMode !== 'hidden' || toolsMode !== 'hidden' || activityMode !== 'hidden' ? (
<Box flexDirection="column">
<ToolTrail
@@ -44,6 +45,7 @@ export const MessageLine = memo(function MessageLine({
reasoningTokens={msg.thinkingTokens}
sections={sections}
t={t}
tools={tools}
toolTokens={msg.toolTokens}
trail={msg.tools ?? []}
/>
@@ -86,7 +88,11 @@ export const MessageLine = memo(function MessageLine({
}
if (msg.role === 'assistant') {
return isStreaming ? <Text color={body}>{msg.text}</Text> : <Md compact={compact} t={t} text={msg.text} />
return isStreaming ? (
<Text color={body}>{boundedLiveRenderText(msg.text)}</Text>
) : (
<Md compact={compact} t={t} text={msg.text} />
)
}
if (msg.role === 'user' && msg.text.length > LONG_MSG && isPasteBackedText(msg.text)) {
@@ -154,4 +160,5 @@ interface MessageLineProps {
msg: Msg
sections?: SectionVisibility
t: Theme
tools?: ActiveTool[]
}

View File

@@ -16,12 +16,14 @@ import {
widthByDepth
} from '../lib/subagentTree.js'
import {
boundedLiveRenderText,
compactPreview,
estimateTokensRough,
fmtK,
formatToolCall,
parseToolTrailResultLine,
pick,
splitToolDuration,
thinkingPreview,
toolTrailLabel
} from '../lib/text.js'
@@ -633,7 +635,12 @@ export const Thinking = memo(function Thinking({
streaming?: boolean
t: Theme
}) {
const preview = useMemo(() => thinkingPreview(reasoning, mode, THINKING_COT_MAX), [mode, reasoning])
const preview = useMemo(() => {
const raw = thinkingPreview(reasoning, mode, THINKING_COT_MAX)
return mode === 'full' ? boundedLiveRenderText(raw) : raw
}, [mode, reasoning])
const lines = useMemo(() => preview.split('\n').map(line => line.replace(/\t/g, ' ')), [preview])
if (!preview && !active) {
@@ -790,7 +797,7 @@ export const ToolTrail = memo(function ToolTrail({
if (parsed) {
groups.push({
color: parsed.mark === '✗' ? t.color.error : t.color.cornsilk,
content: parsed.detail ? parsed.call : `${parsed.call} ${parsed.mark}`,
content: parsed.call,
details: [],
key: `tr-${i}`,
label: parsed.call
@@ -886,6 +893,21 @@ export const ToolTrail = memo(function ToolTrail({
const delegateGroups = groups.filter(g => g.label.startsWith('Delegate Task'))
const inlineDelegateKey = hasSubagents && delegateGroups.length === 1 ? delegateGroups[0]!.key : null
const toolLabel = (group: Group) => {
const { duration, label } = splitToolDuration(String(group.content))
return duration ? (
<>
{label}
<Text color={t.color.dim} dim>
{duration}
</Text>
</>
) : (
group.content
)
}
// ── Backstop: floating alerts when every panel is hidden ─────────
//
// Per-section overrides win over the global details_mode (they're computed
@@ -1051,7 +1073,7 @@ export const ToolTrail = memo(function ToolTrail({
content={
<>
<Text color={t.color.amber}> </Text>
{group.content}
{toolLabel(group)}
</>
}
rails={rails}

View File

@@ -1,4 +1,6 @@
export const LARGE_PASTE = { chars: 8000, lines: 80 }
export const LIVE_RENDER_MAX_CHARS = 16_000
export const LIVE_RENDER_MAX_LINES = 240
export const LONG_MSG = 300
export const MAX_HISTORY = 800
export const THINKING_COT_MAX = 160

View File

@@ -1,4 +1,4 @@
import { THINKING_COT_MAX } from '../config/limits.js'
import { LIVE_RENDER_MAX_CHARS, LIVE_RENDER_MAX_LINES, THINKING_COT_MAX } from '../config/limits.js'
import { VERBS } from '../content/verbs.js'
import type { ThinkingMode } from '../types.js'
@@ -88,6 +88,61 @@ export const thinkingPreview = (reasoning: string, mode: ThinkingMode, max: numb
return !raw || mode === 'collapsed' ? '' : mode === 'full' ? raw : compactPreview(raw.replace(WS_RE, ' '), max)
}
export const boundedLiveRenderText = (
text: string,
{ maxChars = LIVE_RENDER_MAX_CHARS, maxLines = LIVE_RENDER_MAX_LINES } = {}
) => {
if (text.length <= maxChars && text.split('\n', maxLines + 1).length <= maxLines) {
return text
}
let start = 0
let idx = text.length
for (let seen = 0; seen < maxLines && idx > 0; seen++) {
idx = text.lastIndexOf('\n', idx - 1)
start = idx < 0 ? 0 : idx + 1
if (idx < 0) {
break
}
}
const lineStart = start
start = Math.max(lineStart, text.length - maxChars)
if (start > lineStart) {
const nextBreak = text.indexOf('\n', start)
if (nextBreak >= 0 && nextBreak < text.length - 1) {
start = nextBreak + 1
}
}
const tail = text.slice(start).trimStart()
const omittedLines = countNewlines(text, start)
const omittedChars = Math.max(0, text.length - tail.length)
const label =
omittedLines > 0
? `[showing live tail; omitted ${fmtK(omittedLines)} lines / ${fmtK(omittedChars)} chars]\n`
: `[showing live tail; omitted ${fmtK(omittedChars)} chars]\n`
return `${label}${tail}`
}
const countNewlines = (text: string, end: number) => {
let count = 0
for (let i = 0; i < end; i++) {
if (text.charCodeAt(i) === 10) {
count++
}
}
return count
}
export const stripTrailingPasteNewlines = (text: string) => (/[^\n]/.test(text) ? text.replace(/\n+$/, '') : text)
export const toolTrailLabel = (name: string) =>
@@ -104,10 +159,17 @@ export const formatToolCall = (name: string, context = '') => {
return preview ? `${label}("${preview}")` : label
}
export const buildToolTrailLine = (name: string, context: string, error?: boolean, note?: string) => {
export const buildToolTrailLine = (
name: string,
context: string,
error?: boolean,
note?: string,
duration?: number
) => {
const detail = compactPreview(note ?? '', 72)
const took = duration !== undefined ? ` (${duration.toFixed(1)}s)` : ''
return `${formatToolCall(name, context)}${detail ? ` :: ${detail}` : ''} ${error ? ' ✗' : ' ✓'}`
return `${formatToolCall(name, context)}${took}${detail ? ` :: ${detail}` : ''} ${error ? '✗' : '✓'}`
}
export const isToolTrailResultLine = (line: string) => line.endsWith(' ✓') || line.endsWith(' ✗')
@@ -134,6 +196,12 @@ export const parseToolTrailResultLine = (line: string) => {
return { call: body, detail: '', mark }
}
export const splitToolDuration = (call: string) => {
const match = call.match(/^(.*?)( \(\d+(?:\.\d)?s\))$/)
return match ? { label: match[1]!, duration: match[2]! } : { label: call, duration: '' }
}
export const isTransientTrailLine = (line: string) => line.startsWith('drafting ') || line === 'analyzing tool output…'
export const sameToolTrailGroup = (label: string, entry: string) =>