Compare commits

...

4 Commits

Author SHA1 Message Date
Teknium
8c5c598a58 fix(tests): update tool_progress_callback test calls to new 4-arg signature
Follow-up to sroecker's PR #5918 — test mocks were using the old 3-arg
callback signature (name, preview, args) instead of the new
(event_type, name, preview, args, **kwargs).
2026-04-07 17:44:04 -07:00
pradeep7127
66d475eabe fix(api_server): preserve conversation history when /v1/runs input is a message array
When /v1/runs receives an OpenAI-style array of messages as input, all
messages except the last user turn are now extracted as conversation_history.
Previously only the last message was kept, silently discarding earlier
context in multi-turn conversations.

Handles multi-part content blocks by flattening text portions. Only fires
when no explicit conversation_history was provided.

Based on PR #5837 by pradeep7127.
2026-04-07 17:41:34 -07:00
VanBladee
0c96b54ef8 feat(api): accept conversation_history in request body
Allow clients to pass explicit conversation_history in /v1/responses and
/v1/runs request bodies instead of relying on server-side response chaining
via previous_response_id. Solves problems with stateless deployments where
the in-memory ResponseStore is lost on restart.

Adds input validation (must be array of {role, content} objects) and clear
precedence: explicit conversation_history > previous_response_id.

Based on PR #5805 by VanBladee, with added input validation.
2026-04-07 17:41:05 -07:00
Steffen Röcker
77d432a545 fix(api_server): update tool_progress_callback signature for Open WebUI streaming
Commit cc2b56b2 changed the tool_progress_callback signature from
(name, preview, args) to (event_type, name, preview, args, **kwargs)
but the API server's chat completion streaming callback was not updated.

This caused tool calls to not display in Open WebUI because the
callback received arguments in wrong positions.

- Update _on_tool_progress to use new 4-arg signature
- Add event_type filter to only show tool.started events
- Add **kwargs for optional duration/is_error parameters
2026-04-07 17:36:34 -07:00
2 changed files with 64 additions and 7 deletions

View File

@@ -564,8 +564,10 @@ class APIServerAdapter(BasePlatformAdapter):
if delta is not None:
_stream_q.put(delta)
def _on_tool_progress(name, preview, args):
def _on_tool_progress(event_type, name, preview, args, **kwargs):
"""Inject tool progress into the SSE stream for Open WebUI."""
if event_type != "tool.started":
return # Only show tool start events in chat stream
if name.startswith("_"):
return # Skip internal events (_thinking)
from agent.display import get_tool_emoji
@@ -816,9 +818,29 @@ class APIServerAdapter(BasePlatformAdapter):
else:
return web.json_response(_openai_error("'input' must be a string or array"), status=400)
# Reconstruct conversation history from previous_response_id
# Accept explicit conversation_history from the request body.
# This lets stateless clients supply their own history instead of
# relying on server-side response chaining via previous_response_id.
# Precedence: explicit conversation_history > previous_response_id.
conversation_history: List[Dict[str, str]] = []
if previous_response_id:
raw_history = body.get("conversation_history")
if raw_history:
if not isinstance(raw_history, list):
return web.json_response(
_openai_error("'conversation_history' must be an array of message objects"),
status=400,
)
for i, entry in enumerate(raw_history):
if not isinstance(entry, dict) or "role" not in entry or "content" not in entry:
return web.json_response(
_openai_error(f"conversation_history[{i}] must have 'role' and 'content' fields"),
status=400,
)
conversation_history.append({"role": str(entry["role"]), "content": str(entry["content"])})
if previous_response_id:
logger.debug("Both conversation_history and previous_response_id provided; using conversation_history")
if not conversation_history and previous_response_id:
stored = self._response_store.get(previous_response_id)
if stored is None:
return web.json_response(_openai_error(f"Previous response not found: {previous_response_id}"), status=404)
@@ -1404,14 +1426,49 @@ class APIServerAdapter(BasePlatformAdapter):
instructions = body.get("instructions")
previous_response_id = body.get("previous_response_id")
# Accept explicit conversation_history from the request body.
# Precedence: explicit conversation_history > previous_response_id.
conversation_history: List[Dict[str, str]] = []
if previous_response_id:
raw_history = body.get("conversation_history")
if raw_history:
if not isinstance(raw_history, list):
return web.json_response(
_openai_error("'conversation_history' must be an array of message objects"),
status=400,
)
for i, entry in enumerate(raw_history):
if not isinstance(entry, dict) or "role" not in entry or "content" not in entry:
return web.json_response(
_openai_error(f"conversation_history[{i}] must have 'role' and 'content' fields"),
status=400,
)
conversation_history.append({"role": str(entry["role"]), "content": str(entry["content"])})
if previous_response_id:
logger.debug("Both conversation_history and previous_response_id provided; using conversation_history")
if not conversation_history and previous_response_id:
stored = self._response_store.get(previous_response_id)
if stored:
conversation_history = list(stored.get("conversation_history", []))
if instructions is None:
instructions = stored.get("instructions")
# When input is a multi-message array, extract all but the last
# message as conversation history (the last becomes user_message).
# Only fires when no explicit history was provided.
if not conversation_history and isinstance(raw_input, list) and len(raw_input) > 1:
for msg in raw_input[:-1]:
if isinstance(msg, dict) and msg.get("role") and msg.get("content"):
content = msg["content"]
if isinstance(content, list):
# Flatten multi-part content blocks to text
content = " ".join(
part.get("text", "") for part in content
if isinstance(part, dict) and part.get("type") == "text"
)
conversation_history.append({"role": msg["role"], "content": str(content)})
session_id = body.get("session_id") or run_id
ephemeral_system_prompt = instructions

View File

@@ -439,7 +439,7 @@ class TestChatCompletionsEndpoint:
tp_cb = kwargs.get("tool_progress_callback")
# Simulate tool progress before streaming content
if tp_cb:
tp_cb("terminal", "ls -la", {"command": "ls -la"})
tp_cb("tool.started", "terminal", "ls -la", {"command": "ls -la"})
if cb:
await asyncio.sleep(0.05)
cb("Here are the files.")
@@ -476,8 +476,8 @@ class TestChatCompletionsEndpoint:
cb = kwargs.get("stream_delta_callback")
tp_cb = kwargs.get("tool_progress_callback")
if tp_cb:
tp_cb("_thinking", "some internal state", {})
tp_cb("web_search", "Python docs", {"query": "Python docs"})
tp_cb("tool.started", "_thinking", "some internal state", {})
tp_cb("tool.started", "web_search", "Python docs", {"query": "Python docs"})
if cb:
await asyncio.sleep(0.05)
cb("Found it.")