Merge remote-tracking branch 'origin/main' into hermes/hermes-6bb9911e

This commit is contained in:
teknium1
2026-03-17 02:54:29 -07:00
25 changed files with 1206 additions and 282 deletions

View File

@@ -39,6 +39,7 @@ custom OpenAI-compatible endpoint without touching the main model settings.
import json
import logging
import os
import threading
from pathlib import Path
from types import SimpleNamespace
from typing import Any, Dict, List, Optional, Tuple
@@ -1171,6 +1172,7 @@ def auxiliary_max_tokens_param(value: int) -> dict:
# Client cache: (provider, async_mode, base_url, api_key) -> (client, default_model)
_client_cache: Dict[tuple, tuple] = {}
_client_cache_lock = threading.Lock()
def _get_cached_client(
@@ -1182,9 +1184,11 @@ def _get_cached_client(
) -> Tuple[Optional[Any], Optional[str]]:
"""Get or create a cached client for the given provider."""
cache_key = (provider, async_mode, base_url or "", api_key or "")
if cache_key in _client_cache:
cached_client, cached_default = _client_cache[cache_key]
return cached_client, model or cached_default
with _client_cache_lock:
if cache_key in _client_cache:
cached_client, cached_default = _client_cache[cache_key]
return cached_client, model or cached_default
# Build outside the lock
client, default_model = resolve_provider_client(
provider,
model,
@@ -1193,7 +1197,11 @@ def _get_cached_client(
explicit_api_key=api_key,
)
if client is not None:
_client_cache[cache_key] = (client, default_model)
with _client_cache_lock:
if cache_key not in _client_cache:
_client_cache[cache_key] = (client, default_model)
else:
client, default_model = _client_cache[cache_key]
return client, model or default_model

View File

@@ -116,6 +116,14 @@ DEFAULT_CONTEXT_LENGTHS = {
"kimi-k2": 262144,
"qwen3-coder": 32768,
"big-pickle": 128000,
# Alibaba Cloud / DashScope Qwen models
"qwen3.5-plus": 131072,
"qwen3-max": 131072,
"qwen3-coder-plus": 131072,
"qwen3-coder-next": 131072,
"qwen-plus-latest": 131072,
"qwen3.5-flash": 131072,
"qwen-vl-max": 32768,
}

12
cli.py
View File

@@ -214,6 +214,7 @@ def load_cli_config() -> Dict[str, Any]:
"streaming": False,
"show_cost": False,
"skin": "default",
"theme_mode": "auto",
},
"clarify": {
"timeout": 120, # Seconds to wait for a clarify answer before auto-proceeding
@@ -3651,8 +3652,17 @@ class HermesCLI:
self.console.print(f"[bold red]Quick command error: {e}[/]")
else:
self.console.print(f"[bold red]Quick command '{base_cmd}' has no command defined[/]")
elif qcmd.get("type") == "alias":
target = qcmd.get("target", "").strip()
if target:
target = target if target.startswith("/") else f"/{target}"
user_args = cmd_original[len(base_cmd):].strip()
aliased_command = f"{target} {user_args}".strip()
return self.process_command(aliased_command)
else:
self.console.print(f"[bold red]Quick command '{base_cmd}' has no target defined[/]")
else:
self.console.print(f"[bold red]Quick command '{base_cmd}' has unsupported type (only 'exec' is supported)[/]")
self.console.print(f"[bold red]Quick command '{base_cmd}' has unsupported type (supported: 'exec', 'alias')[/]")
# Check for skill slash commands (/gif-search, /axolotl, etc.)
elif base_cmd in _skill_commands:
user_instruction = cmd_original[len(base_cmd):].strip()

View File

@@ -118,6 +118,11 @@ class TelegramAdapter(BasePlatformAdapter):
self._pending_photo_batch_tasks: Dict[str, asyncio.Task] = {}
self._media_group_events: Dict[str, MessageEvent] = {}
self._media_group_tasks: Dict[str, asyncio.Task] = {}
# Buffer rapid text messages so Telegram client-side splits of long
# messages are aggregated into a single MessageEvent.
self._text_batch_delay_seconds = float(os.getenv("HERMES_TELEGRAM_TEXT_BATCH_DELAY_SECONDS", "0.6"))
self._pending_text_batches: Dict[str, MessageEvent] = {}
self._pending_text_batch_tasks: Dict[str, asyncio.Task] = {}
self._token_lock_identity: Optional[str] = None
self._polling_error_task: Optional[asyncio.Task] = None
@@ -795,12 +800,17 @@ class TelegramAdapter(BasePlatformAdapter):
return text
async def _handle_text_message(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handle incoming text messages."""
"""Handle incoming text messages.
Telegram clients split long messages into multiple updates. Buffer
rapid successive text messages from the same user/chat and aggregate
them into a single MessageEvent before dispatching.
"""
if not update.message or not update.message.text:
return
event = self._build_message_event(update.message, MessageType.TEXT)
await self.handle_message(event)
self._enqueue_text_event(event)
async def _handle_command(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Handle incoming command messages."""
@@ -845,6 +855,68 @@ class TelegramAdapter(BasePlatformAdapter):
event.text = "\n".join(parts)
await self.handle_message(event)
# ------------------------------------------------------------------
# Text message aggregation (handles Telegram client-side splits)
# ------------------------------------------------------------------
def _text_batch_key(self, event: MessageEvent) -> str:
"""Session-scoped key for text message batching."""
from gateway.session import build_session_key
return build_session_key(
event.source,
group_sessions_per_user=self.config.extra.get("group_sessions_per_user", True),
)
def _enqueue_text_event(self, event: MessageEvent) -> None:
"""Buffer a text event and reset the flush timer.
When Telegram splits a long user message into multiple updates,
they arrive within a few hundred milliseconds. This method
concatenates them and waits for a short quiet period before
dispatching the combined message.
"""
key = self._text_batch_key(event)
existing = self._pending_text_batches.get(key)
if existing is None:
self._pending_text_batches[key] = event
else:
# Append text from the follow-up chunk
if event.text:
existing.text = f"{existing.text}\n{event.text}" if existing.text else event.text
# Merge any media that might be attached
if event.media_urls:
existing.media_urls.extend(event.media_urls)
existing.media_types.extend(event.media_types)
# Cancel any pending flush and restart the timer
prior_task = self._pending_text_batch_tasks.get(key)
if prior_task and not prior_task.done():
prior_task.cancel()
self._pending_text_batch_tasks[key] = asyncio.create_task(
self._flush_text_batch(key)
)
async def _flush_text_batch(self, key: str) -> None:
"""Wait for the quiet period then dispatch the aggregated text."""
current_task = asyncio.current_task()
try:
await asyncio.sleep(self._text_batch_delay_seconds)
event = self._pending_text_batches.pop(key, None)
if not event:
return
logger.info(
"[Telegram] Flushing text batch %s (%d chars)",
key, len(event.text or ""),
)
await self.handle_message(event)
finally:
if self._pending_text_batch_tasks.get(key) is current_task:
self._pending_text_batch_tasks.pop(key, None)
# ------------------------------------------------------------------
# Photo batching
# ------------------------------------------------------------------
def _photo_batch_key(self, event: MessageEvent, msg: Message) -> str:
"""Return a batching key for Telegram photos/albums."""
from gateway.session import build_session_key

View File

@@ -1421,8 +1421,19 @@ class GatewayRunner:
return f"Quick command error: {e}"
else:
return f"Quick command '/{command}' has no command defined."
elif qcmd.get("type") == "alias":
target = qcmd.get("target", "").strip()
if target:
target = target if target.startswith("/") else f"/{target}"
target_command = target.lstrip("/")
user_args = event.get_command_args().strip()
event.text = f"{target} {user_args}".strip()
command = target_command
# Fall through to normal command dispatch below
else:
return f"Quick command '/{command}' has no target defined."
else:
return f"Quick command '/{command}' has unsupported type (only 'exec' is supported)."
return f"Quick command '/{command}' has unsupported type (supported: 'exec', 'alias')."
# Skill slash commands: /skill-name loads the skill and sends to agent
if command:

View File

@@ -139,6 +139,14 @@ PROVIDER_REGISTRY: Dict[str, ProviderConfig] = {
inference_base_url="https://api.anthropic.com",
api_key_env_vars=("ANTHROPIC_API_KEY", "ANTHROPIC_TOKEN", "CLAUDE_CODE_OAUTH_TOKEN"),
),
"alibaba": ProviderConfig(
id="alibaba",
name="Alibaba Cloud (DashScope)",
auth_type="api_key",
inference_base_url="https://dashscope-intl.aliyuncs.com/apps/anthropic",
api_key_env_vars=("DASHSCOPE_API_KEY",),
base_url_env_var="DASHSCOPE_BASE_URL",
),
"minimax-cn": ProviderConfig(
id="minimax-cn",
name="MiniMax (China)",

View File

@@ -1,5 +1,6 @@
"""Shared ANSI color utilities for Hermes CLI modules."""
import os
import sys
@@ -20,3 +21,123 @@ def color(text: str, *codes) -> str:
if not sys.stdout.isatty():
return text
return "".join(codes) + text + Colors.RESET
# =============================================================================
# Terminal background detection (light vs dark)
# =============================================================================
def _detect_via_colorfgbg() -> str:
"""Check the COLORFGBG environment variable.
Some terminals (rxvt, xterm, iTerm2) set COLORFGBG to ``<fg>;<bg>``
where bg >= 8 usually means a dark background.
Returns "light", "dark", or "unknown".
"""
val = os.environ.get("COLORFGBG", "")
if not val:
return "unknown"
parts = val.split(";")
try:
bg = int(parts[-1])
except (ValueError, IndexError):
return "unknown"
# Standard terminal colors 0-6 are dark, 7+ are light.
# bg < 7 → dark background; bg >= 7 → light background.
if bg >= 7:
return "light"
return "dark"
def _detect_via_macos_appearance() -> str:
"""Check macOS AppleInterfaceStyle via ``defaults read``.
Returns "light", "dark", or "unknown".
"""
if sys.platform != "darwin":
return "unknown"
try:
import subprocess
result = subprocess.run(
["defaults", "read", "-g", "AppleInterfaceStyle"],
capture_output=True, text=True, timeout=2,
)
if result.returncode == 0 and "dark" in result.stdout.lower():
return "dark"
# If the key doesn't exist, macOS is in light mode.
return "light"
except Exception:
return "unknown"
def _detect_via_osc11() -> str:
"""Query the terminal background colour via the OSC 11 escape sequence.
Writes ``\\e]11;?\\a`` and reads the response to determine luminance.
Only works when stdin/stdout are connected to a real TTY (not piped).
Returns "light", "dark", or "unknown".
"""
if sys.platform == "win32":
return "unknown"
if not (sys.stdin.isatty() and sys.stdout.isatty()):
return "unknown"
try:
import select
import termios
import tty
fd = sys.stdin.fileno()
old_attrs = termios.tcgetattr(fd)
try:
tty.setraw(fd)
# Send OSC 11 query
sys.stdout.write("\x1b]11;?\x07")
sys.stdout.flush()
# Wait briefly for response
if not select.select([fd], [], [], 0.1)[0]:
return "unknown"
response = b""
while select.select([fd], [], [], 0.05)[0]:
response += os.read(fd, 128)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_attrs)
# Parse response: \x1b]11;rgb:RRRR/GGGG/BBBB\x07 (or \x1b\\)
text = response.decode("latin-1", errors="replace")
if "rgb:" not in text:
return "unknown"
rgb_part = text.split("rgb:")[-1].split("\x07")[0].split("\x1b")[0]
channels = rgb_part.split("/")
if len(channels) < 3:
return "unknown"
# Each channel is 2 or 4 hex digits; normalise to 0-255
vals = []
for ch in channels[:3]:
ch = ch.strip()
if len(ch) <= 2:
vals.append(int(ch, 16))
else:
vals.append(int(ch[:2], 16)) # take high byte
# Perceived luminance (ITU-R BT.601)
luminance = 0.299 * vals[0] + 0.587 * vals[1] + 0.114 * vals[2]
return "light" if luminance > 128 else "dark"
except Exception:
return "unknown"
def detect_terminal_background() -> str:
"""Detect whether the terminal has a light or dark background.
Tries three strategies in order:
1. COLORFGBG environment variable
2. macOS appearance setting
3. OSC 11 escape sequence query
Returns "light", "dark", or "unknown" if detection fails.
"""
for detector in (_detect_via_colorfgbg, _detect_via_macos_appearance, _detect_via_osc11):
result = detector()
if result != "unknown":
return result
return "unknown"

View File

@@ -233,6 +233,7 @@ DEFAULT_CONFIG = {
"streaming": False,
"show_cost": False, # Show $ cost in the status bar (off by default)
"skin": "default",
"theme_mode": "auto",
},
# Privacy settings
@@ -492,6 +493,21 @@ OPTIONAL_ENV_VARS = {
"password": False,
"category": "provider",
},
"DASHSCOPE_API_KEY": {
"description": "Alibaba Cloud DashScope API key for Qwen models",
"prompt": "DashScope API Key",
"url": "https://modelstudio.console.alibabacloud.com/",
"password": True,
"category": "provider",
},
"DASHSCOPE_BASE_URL": {
"description": "Custom DashScope base URL (default: international endpoint)",
"prompt": "DashScope Base URL",
"url": "",
"password": False,
"category": "provider",
"advanced": True,
},
"OPENCODE_ZEN_API_KEY": {
"description": "OpenCode Zen API key (pay-as-you-go access to curated models)",
"prompt": "OpenCode Zen API key",

View File

@@ -146,6 +146,15 @@ _PROVIDER_MODELS: dict[str, list[str]] = {
"google/gemini-3-pro-preview",
"google/gemini-3-flash-preview",
],
"alibaba": [
"qwen3.5-plus",
"qwen3-max",
"qwen3-coder-plus",
"qwen3-coder-next",
"qwen-plus-latest",
"qwen3.5-flash",
"qwen-vl-max",
],
}
_PROVIDER_LABELS = {
@@ -162,6 +171,7 @@ _PROVIDER_LABELS = {
"opencode-go": "OpenCode Go",
"ai-gateway": "AI Gateway",
"kilocode": "Kilo Code",
"alibaba": "Alibaba Cloud (DashScope)",
"custom": "Custom endpoint",
}
@@ -187,6 +197,10 @@ _PROVIDER_ALIASES = {
"kilo": "kilocode",
"kilo-code": "kilocode",
"kilo-gateway": "kilocode",
"dashscope": "alibaba",
"aliyun": "alibaba",
"qwen": "alibaba",
"alibaba-cloud": "alibaba",
}
@@ -220,7 +234,7 @@ def list_available_providers() -> list[dict[str, str]]:
# Canonical providers in display order
_PROVIDER_ORDER = [
"openrouter", "nous", "openai-codex",
"zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic",
"zai", "kimi-coding", "minimax", "minimax-cn", "kilocode", "anthropic", "alibaba",
"opencode-zen", "opencode-go",
"ai-gateway", "deepseek", "custom",
]

View File

@@ -285,6 +285,19 @@ def resolve_runtime_provider(
"requested_provider": requested_provider,
}
# Alibaba Cloud / DashScope (Anthropic-compatible endpoint)
if provider == "alibaba":
creds = resolve_api_key_provider_credentials(provider)
base_url = creds.get("base_url", "").rstrip("/") or "https://dashscope-intl.aliyuncs.com/apps/anthropic"
return {
"provider": "alibaba",
"api_mode": "anthropic_messages",
"base_url": base_url,
"api_key": creds.get("api_key", ""),
"source": creds.get("source", "env"),
"requested_provider": requested_provider,
}
# API-key providers (z.ai/GLM, Kimi, MiniMax, MiniMax-CN)
pconfig = PROVIDER_REGISTRY.get(provider)
if pconfig and pconfig.auth_type == "api_key":

View File

@@ -114,6 +114,7 @@ class SkinConfig:
name: str
description: str = ""
colors: Dict[str, str] = field(default_factory=dict)
colors_light: Dict[str, str] = field(default_factory=dict)
spinner: Dict[str, Any] = field(default_factory=dict)
branding: Dict[str, str] = field(default_factory=dict)
tool_prefix: str = ""
@@ -122,7 +123,12 @@ class SkinConfig:
banner_hero: str = "" # Rich-markup hero art (replaces HERMES_CADUCEUS)
def get_color(self, key: str, fallback: str = "") -> str:
"""Get a color value with fallback."""
"""Get a color value with fallback.
In light theme mode, returns the light override if available.
"""
if get_theme_mode() == "light" and key in self.colors_light:
return self.colors_light[key]
return self.colors.get(key, fallback)
def get_spinner_list(self, key: str) -> List[str]:
@@ -168,6 +174,21 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#DAA520",
"session_border": "#8B8682",
},
"colors_light": {
"banner_border": "#7A5A00",
"banner_title": "#6B4C00",
"banner_accent": "#7A5500",
"banner_dim": "#8B7355",
"banner_text": "#3D2B00",
"prompt": "#3D2B00",
"ui_accent": "#7A5500",
"ui_label": "#01579B",
"ui_ok": "#1B5E20",
"input_rule": "#7A5A00",
"response_border": "#6B4C00",
"session_label": "#5C4300",
"session_border": "#8B7355",
},
"spinner": {
# Empty = use hardcoded defaults in display.py
},
@@ -201,6 +222,21 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#C7A96B",
"session_border": "#6E584B",
},
"colors_light": {
"banner_border": "#6B1010",
"banner_title": "#5C4300",
"banner_accent": "#8B1A1A",
"banner_dim": "#5C4030",
"banner_text": "#3A1800",
"prompt": "#3A1800",
"ui_accent": "#8B1A1A",
"ui_label": "#5C4300",
"ui_ok": "#1B5E20",
"input_rule": "#6B1010",
"response_border": "#7A1515",
"session_label": "#5C4300",
"session_border": "#5C4A3A",
},
"spinner": {
"waiting_faces": ["(⚔)", "(⛨)", "(▲)", "(<>)", "(/)"],
"thinking_faces": ["(⚔)", "(⛨)", "(▲)", "(⌁)", "(<>)"],
@@ -265,6 +301,22 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#888888",
"session_border": "#555555",
},
"colors_light": {
"banner_border": "#333333",
"banner_title": "#222222",
"banner_accent": "#333333",
"banner_dim": "#555555",
"banner_text": "#333333",
"prompt": "#222222",
"ui_accent": "#333333",
"ui_label": "#444444",
"ui_ok": "#444444",
"ui_error": "#333333",
"input_rule": "#333333",
"response_border": "#444444",
"session_label": "#444444",
"session_border": "#666666",
},
"spinner": {},
"branding": {
"agent_name": "Hermes Agent",
@@ -296,6 +348,21 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#7eb8f6",
"session_border": "#4b5563",
},
"colors_light": {
"banner_border": "#1A3A7A",
"banner_title": "#1A3570",
"banner_accent": "#1E4090",
"banner_dim": "#3B4555",
"banner_text": "#1A2A50",
"prompt": "#1A2A50",
"ui_accent": "#1A3570",
"ui_label": "#1E3A80",
"ui_ok": "#1B5E20",
"input_rule": "#1A3A7A",
"response_border": "#2A4FA0",
"session_label": "#1A3570",
"session_border": "#5A6070",
},
"spinner": {},
"branding": {
"agent_name": "Hermes Agent",
@@ -327,6 +394,21 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#A9DFFF",
"session_border": "#496884",
},
"colors_light": {
"banner_border": "#0D3060",
"banner_title": "#0D3060",
"banner_accent": "#154080",
"banner_dim": "#2A4565",
"banner_text": "#0A2850",
"prompt": "#0A2850",
"ui_accent": "#0D3060",
"ui_label": "#0D3060",
"ui_ok": "#1B5E20",
"input_rule": "#0D3060",
"response_border": "#1A5090",
"session_label": "#0D3060",
"session_border": "#3A5575",
},
"spinner": {
"waiting_faces": ["(≈)", "(Ψ)", "(∿)", "(◌)", "(◠)"],
"thinking_faces": ["(Ψ)", "(∿)", "(≈)", "(⌁)", "(◌)"],
@@ -391,6 +473,23 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#919191",
"session_border": "#656565",
},
"colors_light": {
"banner_border": "#666666",
"banner_title": "#222222",
"banner_accent": "#333333",
"banner_dim": "#555555",
"banner_text": "#333333",
"prompt": "#222222",
"ui_accent": "#333333",
"ui_label": "#444444",
"ui_ok": "#444444",
"ui_error": "#333333",
"ui_warn": "#444444",
"input_rule": "#666666",
"response_border": "#555555",
"session_label": "#444444",
"session_border": "#777777",
},
"spinner": {
"waiting_faces": ["(◉)", "(◌)", "(◬)", "(⬤)", "(::)"],
"thinking_faces": ["(◉)", "(◬)", "(◌)", "(○)", "(●)"],
@@ -456,6 +555,21 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
"session_label": "#FFD39A",
"session_border": "#6C4724",
},
"colors_light": {
"banner_border": "#7A3511",
"banner_title": "#5C2D00",
"banner_accent": "#8B4000",
"banner_dim": "#5A3A1A",
"banner_text": "#3A1E00",
"prompt": "#3A1E00",
"ui_accent": "#8B4000",
"ui_label": "#5C2D00",
"ui_ok": "#1B5E20",
"input_rule": "#7A3511",
"response_border": "#8B4513",
"session_label": "#5C2D00",
"session_border": "#6B5540",
},
"spinner": {
"waiting_faces": ["(✦)", "(▲)", "(◇)", "(<>)", "(🔥)"],
"thinking_faces": ["(✦)", "(▲)", "(◇)", "(⌁)", "(🔥)"],
@@ -509,6 +623,8 @@ _BUILTIN_SKINS: Dict[str, Dict[str, Any]] = {
_active_skin: Optional[SkinConfig] = None
_active_skin_name: str = "default"
_theme_mode: str = "auto"
_resolved_theme_mode: Optional[str] = None
def _skins_dir() -> Path:
@@ -536,6 +652,8 @@ def _build_skin_config(data: Dict[str, Any]) -> SkinConfig:
default = _BUILTIN_SKINS["default"]
colors = dict(default.get("colors", {}))
colors.update(data.get("colors", {}))
colors_light = dict(default.get("colors_light", {}))
colors_light.update(data.get("colors_light", {}))
spinner = dict(default.get("spinner", {}))
spinner.update(data.get("spinner", {}))
branding = dict(default.get("branding", {}))
@@ -545,6 +663,7 @@ def _build_skin_config(data: Dict[str, Any]) -> SkinConfig:
name=data.get("name", "unknown"),
description=data.get("description", ""),
colors=colors,
colors_light=colors_light,
spinner=spinner,
branding=branding,
tool_prefix=data.get("tool_prefix", default.get("tool_prefix", "")),
@@ -625,6 +744,39 @@ def get_active_skin_name() -> str:
return _active_skin_name
def get_theme_mode() -> str:
"""Return the resolved theme mode: "light" or "dark".
When ``_theme_mode`` is ``"auto"``, detection is attempted once and cached.
If detection returns ``"unknown"``, defaults to ``"dark"``.
"""
global _resolved_theme_mode
if _theme_mode in ("light", "dark"):
return _theme_mode
# Auto mode — detect and cache
if _resolved_theme_mode is None:
try:
from hermes_cli.colors import detect_terminal_background
detected = detect_terminal_background()
except Exception:
detected = "unknown"
_resolved_theme_mode = detected if detected in ("light", "dark") else "dark"
return _resolved_theme_mode
def set_theme_mode(mode: str) -> None:
"""Set the theme mode to "light", "dark", or "auto"."""
global _theme_mode, _resolved_theme_mode
_theme_mode = mode
# Reset cached detection so it re-runs on next get_theme_mode() if auto
_resolved_theme_mode = None
def get_theme_mode_setting() -> str:
"""Return the raw theme mode setting (may be "auto", "light", or "dark")."""
return _theme_mode
def init_skin_from_config(config: dict) -> None:
"""Initialize the active skin from CLI config at startup.
@@ -637,6 +789,13 @@ def init_skin_from_config(config: dict) -> None:
else:
set_active_skin("default")
# Theme mode
theme_mode = display.get("theme_mode", "auto")
if isinstance(theme_mode, str) and theme_mode.strip():
set_theme_mode(theme_mode.strip())
else:
set_theme_mode("auto")
# =============================================================================
# Convenience helpers for CLI modules
@@ -690,6 +849,14 @@ def get_prompt_toolkit_style_overrides() -> Dict[str, str]:
warn = skin.get_color("ui_warn", "#FF8C00")
error = skin.get_color("ui_error", "#FF6B6B")
# Use lighter background colours for completion menus in light mode
if get_theme_mode() == "light":
menu_bg = "bg:#e8e8e8"
menu_sel_bg = "bg:#d0d0d0"
else:
menu_bg = "bg:#1a1a2e"
menu_sel_bg = "bg:#333355"
return {
"input-area": prompt,
"placeholder": f"{dim} italic",
@@ -698,11 +865,11 @@ def get_prompt_toolkit_style_overrides() -> Dict[str, str]:
"hint": f"{dim} italic",
"input-rule": input_rule,
"image-badge": f"{label} bold",
"completion-menu": f"bg:#1a1a2e {text}",
"completion-menu.completion": f"bg:#1a1a2e {text}",
"completion-menu.completion.current": f"bg:#333355 {title}",
"completion-menu.meta.completion": f"bg:#1a1a2e {dim}",
"completion-menu.meta.completion.current": f"bg:#333355 {label}",
"completion-menu": f"{menu_bg} {text}",
"completion-menu.completion": f"{menu_bg} {text}",
"completion-menu.completion.current": f"{menu_sel_bg} {title}",
"completion-menu.meta.completion": f"{menu_bg} {dim}",
"completion-menu.meta.completion.current": f"{menu_sel_bg} {label}",
"clarify-border": input_rule,
"clarify-title": f"{title} bold",
"clarify-question": f"{text} bold",

View File

@@ -18,6 +18,7 @@ import json
import os
import re
import sqlite3
import threading
import time
from pathlib import Path
from typing import Dict, Any, List, Optional
@@ -104,6 +105,7 @@ class SessionDB:
self.db_path = db_path or DEFAULT_DB_PATH
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self._lock = threading.Lock()
self._conn = sqlite3.connect(
str(self.db_path),
check_same_thread=False,
@@ -173,9 +175,10 @@ class SessionDB:
def close(self):
"""Close the database connection."""
if self._conn:
self._conn.close()
self._conn = None
with self._lock:
if self._conn:
self._conn.close()
self._conn = None
# =========================================================================
# Session lifecycle
@@ -192,61 +195,66 @@ class SessionDB:
parent_session_id: str = None,
) -> str:
"""Create a new session record. Returns the session_id."""
self._conn.execute(
"""INSERT INTO sessions (id, source, user_id, model, model_config,
system_prompt, parent_session_id, started_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(
session_id,
source,
user_id,
model,
json.dumps(model_config) if model_config else None,
system_prompt,
parent_session_id,
time.time(),
),
)
self._conn.commit()
with self._lock:
self._conn.execute(
"""INSERT INTO sessions (id, source, user_id, model, model_config,
system_prompt, parent_session_id, started_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(
session_id,
source,
user_id,
model,
json.dumps(model_config) if model_config else None,
system_prompt,
parent_session_id,
time.time(),
),
)
self._conn.commit()
return session_id
def end_session(self, session_id: str, end_reason: str) -> None:
"""Mark a session as ended."""
self._conn.execute(
"UPDATE sessions SET ended_at = ?, end_reason = ? WHERE id = ?",
(time.time(), end_reason, session_id),
)
self._conn.commit()
with self._lock:
self._conn.execute(
"UPDATE sessions SET ended_at = ?, end_reason = ? WHERE id = ?",
(time.time(), end_reason, session_id),
)
self._conn.commit()
def update_system_prompt(self, session_id: str, system_prompt: str) -> None:
"""Store the full assembled system prompt snapshot."""
self._conn.execute(
"UPDATE sessions SET system_prompt = ? WHERE id = ?",
(system_prompt, session_id),
)
self._conn.commit()
with self._lock:
self._conn.execute(
"UPDATE sessions SET system_prompt = ? WHERE id = ?",
(system_prompt, session_id),
)
self._conn.commit()
def update_token_counts(
self, session_id: str, input_tokens: int = 0, output_tokens: int = 0,
model: str = None,
) -> None:
"""Increment token counters and backfill model if not already set."""
self._conn.execute(
"""UPDATE sessions SET
input_tokens = input_tokens + ?,
output_tokens = output_tokens + ?,
model = COALESCE(model, ?)
WHERE id = ?""",
(input_tokens, output_tokens, model, session_id),
)
self._conn.commit()
with self._lock:
self._conn.execute(
"""UPDATE sessions SET
input_tokens = input_tokens + ?,
output_tokens = output_tokens + ?,
model = COALESCE(model, ?)
WHERE id = ?""",
(input_tokens, output_tokens, model, session_id),
)
self._conn.commit()
def get_session(self, session_id: str) -> Optional[Dict[str, Any]]:
"""Get a session by ID."""
cursor = self._conn.execute(
"SELECT * FROM sessions WHERE id = ?", (session_id,)
)
row = cursor.fetchone()
with self._lock:
cursor = self._conn.execute(
"SELECT * FROM sessions WHERE id = ?", (session_id,)
)
row = cursor.fetchone()
return dict(row) if row else None
def resolve_session_id(self, session_id_or_prefix: str) -> Optional[str]:
@@ -331,38 +339,42 @@ class SessionDB:
Empty/whitespace-only strings are normalized to None (clearing the title).
"""
title = self.sanitize_title(title)
if title:
# Check uniqueness (allow the same session to keep its own title)
with self._lock:
if title:
# Check uniqueness (allow the same session to keep its own title)
cursor = self._conn.execute(
"SELECT id FROM sessions WHERE title = ? AND id != ?",
(title, session_id),
)
conflict = cursor.fetchone()
if conflict:
raise ValueError(
f"Title '{title}' is already in use by session {conflict['id']}"
)
cursor = self._conn.execute(
"SELECT id FROM sessions WHERE title = ? AND id != ?",
"UPDATE sessions SET title = ? WHERE id = ?",
(title, session_id),
)
conflict = cursor.fetchone()
if conflict:
raise ValueError(
f"Title '{title}' is already in use by session {conflict['id']}"
)
cursor = self._conn.execute(
"UPDATE sessions SET title = ? WHERE id = ?",
(title, session_id),
)
self._conn.commit()
return cursor.rowcount > 0
self._conn.commit()
rowcount = cursor.rowcount
return rowcount > 0
def get_session_title(self, session_id: str) -> Optional[str]:
"""Get the title for a session, or None."""
cursor = self._conn.execute(
"SELECT title FROM sessions WHERE id = ?", (session_id,)
)
row = cursor.fetchone()
with self._lock:
cursor = self._conn.execute(
"SELECT title FROM sessions WHERE id = ?", (session_id,)
)
row = cursor.fetchone()
return row["title"] if row else None
def get_session_by_title(self, title: str) -> Optional[Dict[str, Any]]:
"""Look up a session by exact title. Returns session dict or None."""
cursor = self._conn.execute(
"SELECT * FROM sessions WHERE title = ?", (title,)
)
row = cursor.fetchone()
with self._lock:
cursor = self._conn.execute(
"SELECT * FROM sessions WHERE title = ?", (title,)
)
row = cursor.fetchone()
return dict(row) if row else None
def resolve_session_by_title(self, title: str) -> Optional[str]:
@@ -379,12 +391,13 @@ class SessionDB:
# Also search for numbered variants: "title #2", "title #3", etc.
# Escape SQL LIKE wildcards (%, _) in the title to prevent false matches
escaped = title.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_")
cursor = self._conn.execute(
"SELECT id, title, started_at FROM sessions "
"WHERE title LIKE ? ESCAPE '\\' ORDER BY started_at DESC",
(f"{escaped} #%",),
)
numbered = cursor.fetchall()
with self._lock:
cursor = self._conn.execute(
"SELECT id, title, started_at FROM sessions "
"WHERE title LIKE ? ESCAPE '\\' ORDER BY started_at DESC",
(f"{escaped} #%",),
)
numbered = cursor.fetchall()
if numbered:
# Return the most recent numbered variant
@@ -409,11 +422,12 @@ class SessionDB:
# Find all existing numbered variants
# Escape SQL LIKE wildcards (%, _) in the base to prevent false matches
escaped = base.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_")
cursor = self._conn.execute(
"SELECT title FROM sessions WHERE title = ? OR title LIKE ? ESCAPE '\\'",
(base, f"{escaped} #%"),
)
existing = [row["title"] for row in cursor.fetchall()]
with self._lock:
cursor = self._conn.execute(
"SELECT title FROM sessions WHERE title = ? OR title LIKE ? ESCAPE '\\'",
(base, f"{escaped} #%"),
)
existing = [row["title"] for row in cursor.fetchall()]
if not existing:
return base # No conflict, use the base name as-is
@@ -461,9 +475,11 @@ class SessionDB:
LIMIT ? OFFSET ?
"""
params = (source, limit, offset) if source else (limit, offset)
cursor = self._conn.execute(query, params)
with self._lock:
cursor = self._conn.execute(query, params)
rows = cursor.fetchall()
sessions = []
for row in cursor.fetchall():
for row in rows:
s = dict(row)
# Build the preview from the raw substring
raw = s.pop("_preview_raw", "").strip()
@@ -497,52 +513,54 @@ class SessionDB:
Also increments the session's message_count (and tool_call_count
if role is 'tool' or tool_calls is present).
"""
cursor = self._conn.execute(
"""INSERT INTO messages (session_id, role, content, tool_call_id,
tool_calls, tool_name, timestamp, token_count, finish_reason)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
session_id,
role,
content,
tool_call_id,
json.dumps(tool_calls) if tool_calls else None,
tool_name,
time.time(),
token_count,
finish_reason,
),
)
msg_id = cursor.lastrowid
# Update counters
# Count actual tool calls from the tool_calls list (not from tool responses).
# A single assistant message can contain multiple parallel tool calls.
num_tool_calls = 0
if tool_calls is not None:
num_tool_calls = len(tool_calls) if isinstance(tool_calls, list) else 1
if num_tool_calls > 0:
self._conn.execute(
"""UPDATE sessions SET message_count = message_count + 1,
tool_call_count = tool_call_count + ? WHERE id = ?""",
(num_tool_calls, session_id),
)
else:
self._conn.execute(
"UPDATE sessions SET message_count = message_count + 1 WHERE id = ?",
(session_id,),
with self._lock:
cursor = self._conn.execute(
"""INSERT INTO messages (session_id, role, content, tool_call_id,
tool_calls, tool_name, timestamp, token_count, finish_reason)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(
session_id,
role,
content,
tool_call_id,
json.dumps(tool_calls) if tool_calls else None,
tool_name,
time.time(),
token_count,
finish_reason,
),
)
msg_id = cursor.lastrowid
self._conn.commit()
# Update counters
# Count actual tool calls from the tool_calls list (not from tool responses).
# A single assistant message can contain multiple parallel tool calls.
num_tool_calls = 0
if tool_calls is not None:
num_tool_calls = len(tool_calls) if isinstance(tool_calls, list) else 1
if num_tool_calls > 0:
self._conn.execute(
"""UPDATE sessions SET message_count = message_count + 1,
tool_call_count = tool_call_count + ? WHERE id = ?""",
(num_tool_calls, session_id),
)
else:
self._conn.execute(
"UPDATE sessions SET message_count = message_count + 1 WHERE id = ?",
(session_id,),
)
self._conn.commit()
return msg_id
def get_messages(self, session_id: str) -> List[Dict[str, Any]]:
"""Load all messages for a session, ordered by timestamp."""
cursor = self._conn.execute(
"SELECT * FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
rows = cursor.fetchall()
with self._lock:
cursor = self._conn.execute(
"SELECT * FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
rows = cursor.fetchall()
result = []
for row in rows:
msg = dict(row)
@@ -559,13 +577,15 @@ class SessionDB:
Load messages in the OpenAI conversation format (role + content dicts).
Used by the gateway to restore conversation history.
"""
cursor = self._conn.execute(
"SELECT role, content, tool_call_id, tool_calls, tool_name "
"FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
with self._lock:
cursor = self._conn.execute(
"SELECT role, content, tool_call_id, tool_calls, tool_name "
"FROM messages WHERE session_id = ? ORDER BY timestamp, id",
(session_id,),
)
rows = cursor.fetchall()
messages = []
for row in cursor.fetchall():
for row in rows:
msg = {"role": row["role"], "content": row["content"]}
if row["tool_call_id"]:
msg["tool_call_id"] = row["tool_call_id"]
@@ -675,31 +695,33 @@ class SessionDB:
LIMIT ? OFFSET ?
"""
try:
cursor = self._conn.execute(sql, params)
except sqlite3.OperationalError:
# FTS5 query syntax error despite sanitization — return empty
return []
matches = [dict(row) for row in cursor.fetchall()]
# Add surrounding context (1 message before + after each match)
for match in matches:
with self._lock:
try:
ctx_cursor = self._conn.execute(
"""SELECT role, content FROM messages
WHERE session_id = ? AND id >= ? - 1 AND id <= ? + 1
ORDER BY id""",
(match["session_id"], match["id"], match["id"]),
)
context_msgs = [
{"role": r["role"], "content": (r["content"] or "")[:200]}
for r in ctx_cursor.fetchall()
]
match["context"] = context_msgs
except Exception:
match["context"] = []
cursor = self._conn.execute(sql, params)
except sqlite3.OperationalError:
# FTS5 query syntax error despite sanitization — return empty
return []
matches = [dict(row) for row in cursor.fetchall()]
# Remove full content from result (snippet is enough, saves tokens)
# Add surrounding context (1 message before + after each match)
for match in matches:
try:
ctx_cursor = self._conn.execute(
"""SELECT role, content FROM messages
WHERE session_id = ? AND id >= ? - 1 AND id <= ? + 1
ORDER BY id""",
(match["session_id"], match["id"], match["id"]),
)
context_msgs = [
{"role": r["role"], "content": (r["content"] or "")[:200]}
for r in ctx_cursor.fetchall()
]
match["context"] = context_msgs
except Exception:
match["context"] = []
# Remove full content from result (snippet is enough, saves tokens)
for match in matches:
match.pop("content", None)
return matches

View File

@@ -69,6 +69,8 @@ class HonchoClientConfig:
workspace_id: str = "hermes"
api_key: str | None = None
environment: str = "production"
# Optional base URL for self-hosted Honcho (overrides environment mapping)
base_url: str | None = None
# Identity
peer_name: str | None = None
ai_peer: str = "hermes"
@@ -361,13 +363,34 @@ def get_honcho_client(config: HonchoClientConfig | None = None) -> Honcho:
"Install it with: pip install honcho-ai"
)
logger.info("Initializing Honcho client (host: %s, workspace: %s)", config.host, config.workspace_id)
# Allow config.yaml honcho.base_url to override the SDK's environment
# mapping, enabling remote self-hosted Honcho deployments without
# requiring the server to live on localhost.
resolved_base_url = config.base_url
if not resolved_base_url:
try:
from hermes_cli.config import load_config
hermes_cfg = load_config()
honcho_cfg = hermes_cfg.get("honcho", {})
if isinstance(honcho_cfg, dict):
resolved_base_url = honcho_cfg.get("base_url", "").strip() or None
except Exception:
pass
_honcho_client = Honcho(
workspace_id=config.workspace_id,
api_key=config.api_key,
environment=config.environment,
)
if resolved_base_url:
logger.info("Initializing Honcho client (base_url: %s, workspace: %s)", resolved_base_url, config.workspace_id)
else:
logger.info("Initializing Honcho client (host: %s, workspace: %s)", config.host, config.workspace_id)
kwargs: dict = {
"workspace_id": config.workspace_id,
"api_key": config.api_key,
"environment": config.environment,
}
if resolved_base_url:
kwargs["base_url"] = resolved_base_url
_honcho_client = Honcho(**kwargs)
return _honcho_client

View File

@@ -0,0 +1,192 @@
---
name: sherlock
description: OSINT username search across 400+ social networks. Hunt down social media accounts by username.
version: 1.0.0
author: unmodeled-tyler
license: MIT
metadata:
hermes:
tags: [osint, security, username, social-media, reconnaissance]
category: security
prerequisites:
commands: [sherlock]
---
# Sherlock OSINT Username Search
Hunt down social media accounts by username across 400+ social networks using the [Sherlock Project](https://github.com/sherlock-project/sherlock).
## When to Use
- User asks to find accounts associated with a username
- User wants to check username availability across platforms
- User is conducting OSINT or reconnaissance research
- User asks "where is this username registered?" or similar
## Requirements
- Sherlock CLI installed: `pipx install sherlock-project` or `pip install sherlock-project`
- Alternatively: Docker available (`docker run -it --rm sherlock/sherlock`)
- Network access to query social platforms
## Procedure
### 1. Check if Sherlock is Installed
**Before doing anything else**, verify sherlock is available:
```bash
sherlock --version
```
If the command fails:
- Offer to install: `pipx install sherlock-project` (recommended) or `pip install sherlock-project`
- **Do NOT** try multiple installation methods — pick one and proceed
- If installation fails, inform the user and stop
### 2. Extract Username
**Extract the username directly from the user's message if clearly stated.**
Examples where you should **NOT** use clarify:
- "Find accounts for nasa" → username is `nasa`
- "Search for johndoe123" → username is `johndoe123`
- "Check if alice exists on social media" → username is `alice`
- "Look up user bob on social networks" → username is `bob`
**Only use clarify if:**
- Multiple potential usernames mentioned ("search for alice or bob")
- Ambiguous phrasing ("search for my username" without specifying)
- No username mentioned at all ("do an OSINT search")
When extracting, take the **exact** username as stated — preserve case, numbers, underscores, etc.
### 3. Build Command
**Default command** (use this unless user specifically requests otherwise):
```bash
sherlock --print-found --no-color "<username>" --timeout 90
```
**Optional flags** (only add if user explicitly requests):
- `--nsfw` — Include NSFW sites (only if user asks)
- `--tor` — Route through Tor (only if user asks for anonymity)
**Do NOT ask about options via clarify** — just run the default search. Users can request specific options if needed.
### 4. Execute Search
Run via the `terminal` tool. The command typically takes 30-120 seconds depending on network conditions and site count.
**Example terminal call:**
```json
{
"command": "sherlock --print-found --no-color \"target_username\"",
"timeout": 180
}
```
### 5. Parse and Present Results
Sherlock outputs found accounts in a simple format. Parse the output and present:
1. **Summary line:** "Found X accounts for username 'Y'"
2. **Categorized links:** Group by platform type if helpful (social, professional, forums, etc.)
3. **Output file location:** Sherlock saves results to `<username>.txt` by default
**Example output parsing:**
```
[+] Instagram: https://instagram.com/username
[+] Twitter: https://twitter.com/username
[+] GitHub: https://github.com/username
```
Present findings as clickable links when possible.
## Pitfalls
### No Results Found
If Sherlock finds no accounts, this is often correct — the username may not be registered on checked platforms. Suggest:
- Checking spelling/variation
- Trying similar usernames with `?` wildcard: `sherlock "user?name"`
- The user may have privacy settings or deleted accounts
### Timeout Issues
Some sites are slow or block automated requests. Use `--timeout 120` to increase wait time, or `--site` to limit scope.
### Tor Configuration
`--tor` requires Tor daemon running. If user wants anonymity but Tor isn't available, suggest:
- Installing Tor service
- Using `--proxy` with an alternative proxy
### False Positives
Some sites always return "found" due to their response structure. Cross-reference unexpected results with manual checks.
### Rate Limiting
Aggressive searches may trigger rate limits. For bulk username searches, add delays between calls or use `--local` with cached data.
## Installation
### pipx (recommended)
```bash
pipx install sherlock-project
```
### pip
```bash
pip install sherlock-project
```
### Docker
```bash
docker pull sherlock/sherlock
docker run -it --rm sherlock/sherlock <username>
```
### Linux packages
Available on Debian 13+, Ubuntu 22.10+, Homebrew, Kali, BlackArch.
## Ethical Use
This tool is for legitimate OSINT and research purposes only. Remind users:
- Only search usernames they own or have permission to investigate
- Respect platform terms of service
- Do not use for harassment, stalking, or illegal activities
- Consider privacy implications before sharing results
## Verification
After running sherlock, verify:
1. Output lists found sites with URLs
2. `<username>.txt` file created (default output) if using file output
3. If `--print-found` used, output should only contain `[+]` lines for matches
## Example Interaction
**User:** "Can you check if the username 'johndoe123' exists on social media?"
**Agent procedure:**
1. Check `sherlock --version` (verify installed)
2. Username provided — proceed directly
3. Run: `sherlock --print-found --no-color "johndoe123" --timeout 90`
4. Parse output and present links
**Response format:**
> Found 12 accounts for username 'johndoe123':
>
> • https://twitter.com/johndoe123
> • https://github.com/johndoe123
> • https://instagram.com/johndoe123
> • [... additional links]
>
> Results saved to: johndoe123.txt
---
**User:** "Search for username 'alice' including NSFW sites"
**Agent procedure:**
1. Check sherlock installed
2. Username + NSFW flag both provided
3. Run: `sherlock --print-found --no-color --nsfw "alice" --timeout 90`
4. Present results

View File

@@ -407,6 +407,7 @@ class AIAgent:
# Subagent delegation state
self._delegate_depth = 0 # 0 = top-level agent, incremented for children
self._active_children = [] # Running child AIAgents (for interrupt propagation)
self._active_children_lock = threading.Lock()
# Store OpenRouter provider preferences
self.providers_allowed = providers_allowed
@@ -1526,7 +1527,9 @@ class AIAgent:
# Signal all tools to abort any in-flight operations immediately
_set_interrupt(True)
# Propagate interrupt to any running child agents (subagent delegation)
for child in self._active_children:
with self._active_children_lock:
children_copy = list(self._active_children)
for child in children_copy:
try:
child.interrupt(message)
except Exception as e:

View File

@@ -0,0 +1,121 @@
"""Tests for Telegram text message aggregation.
When a user sends a long message, Telegram clients split it into multiple
updates. The TelegramAdapter should buffer rapid successive text messages
from the same session and aggregate them before dispatching.
"""
import asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from gateway.config import Platform, PlatformConfig
from gateway.platforms.base import MessageEvent, MessageType, SessionSource
def _make_adapter():
"""Create a minimal TelegramAdapter for testing text batching."""
from gateway.platforms.telegram import TelegramAdapter
config = PlatformConfig(enabled=True, token="test-token")
adapter = object.__new__(TelegramAdapter)
adapter._platform = Platform.TELEGRAM
adapter.config = config
adapter._pending_text_batches = {}
adapter._pending_text_batch_tasks = {}
adapter._text_batch_delay_seconds = 0.1 # fast for tests
adapter._active_sessions = {}
adapter._pending_messages = {}
adapter._message_handler = AsyncMock()
adapter.handle_message = AsyncMock()
return adapter
def _make_event(text: str, chat_id: str = "12345") -> MessageEvent:
return MessageEvent(
text=text,
message_type=MessageType.TEXT,
source=SessionSource(platform=Platform.TELEGRAM, chat_id=chat_id, chat_type="dm"),
)
class TestTextBatching:
@pytest.mark.asyncio
async def test_single_message_dispatched_after_delay(self):
adapter = _make_adapter()
event = _make_event("hello world")
adapter._enqueue_text_event(event)
# Not dispatched yet
adapter.handle_message.assert_not_called()
# Wait for flush
await asyncio.sleep(0.2)
adapter.handle_message.assert_called_once()
dispatched = adapter.handle_message.call_args[0][0]
assert dispatched.text == "hello world"
@pytest.mark.asyncio
async def test_split_messages_aggregated(self):
"""Two rapid messages from the same chat should be merged."""
adapter = _make_adapter()
adapter._enqueue_text_event(_make_event("This is part one of a long"))
await asyncio.sleep(0.02) # small gap, within batch window
adapter._enqueue_text_event(_make_event("message that was split by Telegram."))
# Not dispatched yet (timer restarted)
adapter.handle_message.assert_not_called()
# Wait for flush
await asyncio.sleep(0.2)
adapter.handle_message.assert_called_once()
dispatched = adapter.handle_message.call_args[0][0]
assert "part one" in dispatched.text
assert "split by Telegram" in dispatched.text
@pytest.mark.asyncio
async def test_three_way_split_aggregated(self):
"""Three rapid messages should all merge."""
adapter = _make_adapter()
adapter._enqueue_text_event(_make_event("chunk 1"))
await asyncio.sleep(0.02)
adapter._enqueue_text_event(_make_event("chunk 2"))
await asyncio.sleep(0.02)
adapter._enqueue_text_event(_make_event("chunk 3"))
await asyncio.sleep(0.2)
adapter.handle_message.assert_called_once()
text = adapter.handle_message.call_args[0][0].text
assert "chunk 1" in text
assert "chunk 2" in text
assert "chunk 3" in text
@pytest.mark.asyncio
async def test_different_chats_not_merged(self):
"""Messages from different chats should be separate batches."""
adapter = _make_adapter()
adapter._enqueue_text_event(_make_event("from user A", chat_id="111"))
adapter._enqueue_text_event(_make_event("from user B", chat_id="222"))
await asyncio.sleep(0.2)
assert adapter.handle_message.call_count == 2
@pytest.mark.asyncio
async def test_batch_cleans_up_after_flush(self):
"""After flushing, internal state should be clean."""
adapter = _make_adapter()
adapter._enqueue_text_event(_make_event("test"))
await asyncio.sleep(0.2)
assert len(adapter._pending_text_batches) == 0
assert len(adapter._pending_text_batch_tasks) == 0

View File

@@ -13,9 +13,13 @@ def reset_skin_state():
from hermes_cli import skin_engine
skin_engine._active_skin = None
skin_engine._active_skin_name = "default"
skin_engine._theme_mode = "auto"
skin_engine._resolved_theme_mode = None
yield
skin_engine._active_skin = None
skin_engine._active_skin_name = "default"
skin_engine._theme_mode = "auto"
skin_engine._resolved_theme_mode = None
class TestSkinConfig:
@@ -312,3 +316,65 @@ class TestCliBrandingHelpers:
assert overrides["clarify-title"] == f"{skin.get_color('banner_title')} bold"
assert overrides["sudo-prompt"] == f"{skin.get_color('ui_error')} bold"
assert overrides["approval-title"] == f"{skin.get_color('ui_warn')} bold"
class TestThemeMode:
def test_get_theme_mode_defaults_to_dark_on_unknown(self):
from hermes_cli.skin_engine import get_theme_mode, set_theme_mode
set_theme_mode("auto")
# In a test env, detection returns "unknown" → defaults to "dark"
with patch("hermes_cli.colors.detect_terminal_background", return_value="unknown"):
from hermes_cli import skin_engine
skin_engine._resolved_theme_mode = None # force re-detection
assert get_theme_mode() == "dark"
def test_set_theme_mode_light(self):
from hermes_cli.skin_engine import get_theme_mode, set_theme_mode
set_theme_mode("light")
assert get_theme_mode() == "light"
def test_set_theme_mode_dark(self):
from hermes_cli.skin_engine import get_theme_mode, set_theme_mode
set_theme_mode("dark")
assert get_theme_mode() == "dark"
def test_get_color_respects_light_mode(self):
from hermes_cli.skin_engine import SkinConfig, set_theme_mode
skin = SkinConfig(
name="test",
colors={"banner_title": "#FFD700", "prompt": "#FFF8DC"},
colors_light={"banner_title": "#6B4C00"},
)
set_theme_mode("light")
assert skin.get_color("banner_title") == "#6B4C00"
# Key not in colors_light falls back to colors
assert skin.get_color("prompt") == "#FFF8DC"
def test_get_color_falls_back_in_dark_mode(self):
from hermes_cli.skin_engine import SkinConfig, set_theme_mode
skin = SkinConfig(
name="test",
colors={"banner_title": "#FFD700", "prompt": "#FFF8DC"},
colors_light={"banner_title": "#6B4C00"},
)
set_theme_mode("dark")
assert skin.get_color("banner_title") == "#FFD700"
assert skin.get_color("prompt") == "#FFF8DC"
def test_init_skin_from_config_reads_theme_mode(self):
from hermes_cli.skin_engine import init_skin_from_config, get_theme_mode_setting
init_skin_from_config({"display": {"skin": "default", "theme_mode": "light"}})
assert get_theme_mode_setting() == "light"
def test_builtin_skins_have_colors_light(self):
from hermes_cli.skin_engine import _BUILTIN_SKINS, _build_skin_config
for name, data in _BUILTIN_SKINS.items():
skin = _build_skin_config(data)
assert len(skin.colors_light) > 0, f"Skin '{name}' has empty colors_light"

View File

@@ -24,6 +24,7 @@ def main() -> int:
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
parent.model = "test/model"
parent.base_url = "http://localhost:1"

View File

@@ -43,6 +43,7 @@ class TestCLISubagentInterrupt(unittest.TestCase):
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
parent.model = "test/model"
parent.base_url = "http://localhost:1"
@@ -112,21 +113,21 @@ class TestCLISubagentInterrupt(unittest.TestCase):
mock_instance._interrupt_requested = False
mock_instance._interrupt_message = None
mock_instance._active_children = []
mock_instance._active_children_lock = threading.Lock()
mock_instance.quiet_mode = True
mock_instance.run_conversation = mock_child_run_conversation
mock_instance.interrupt = lambda msg=None: setattr(mock_instance, '_interrupt_requested', True) or setattr(mock_instance, '_interrupt_message', msg)
mock_instance.tools = []
MockAgent.return_value = mock_instance
# Register child manually (normally done by _build_child_agent)
parent._active_children.append(mock_instance)
result = _run_single_child(
task_index=0,
goal="Do something slow",
context=None,
toolsets=["terminal"],
model=None,
max_iterations=50,
child=mock_instance,
parent_agent=parent,
task_count=1,
)
delegate_result[0] = result
except Exception as e:

View File

@@ -57,6 +57,7 @@ def main() -> int:
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
parent.model = "test/model"
parent.base_url = "http://localhost:1"

View File

@@ -30,12 +30,14 @@ class TestInterruptPropagationToChild(unittest.TestCase):
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
child = AIAgent.__new__(AIAgent)
child._interrupt_requested = False
child._interrupt_message = None
child._active_children = []
child._active_children_lock = threading.Lock()
child.quiet_mode = True
parent._active_children.append(child)
@@ -60,6 +62,7 @@ class TestInterruptPropagationToChild(unittest.TestCase):
child._interrupt_message = "msg"
child.quiet_mode = True
child._active_children = []
child._active_children_lock = threading.Lock()
# Global is set
set_interrupt(True)
@@ -78,6 +81,7 @@ class TestInterruptPropagationToChild(unittest.TestCase):
child._interrupt_requested = False
child._interrupt_message = None
child._active_children = []
child._active_children_lock = threading.Lock()
child.quiet_mode = True
child.api_mode = "chat_completions"
child.log_prefix = ""
@@ -119,12 +123,14 @@ class TestInterruptPropagationToChild(unittest.TestCase):
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
child = AIAgent.__new__(AIAgent)
child._interrupt_requested = False
child._interrupt_message = None
child._active_children = []
child._active_children_lock = threading.Lock()
child.quiet_mode = True
# Register child (simulating what _run_single_child does)

View File

@@ -47,6 +47,28 @@ class TestCLIQuickCommands:
args = cli.console.print.call_args[0][0]
assert "no output" in args.lower()
def test_alias_command_routes_to_target(self):
"""Alias quick commands rewrite to the target command."""
cli = self._make_cli({"shortcut": {"type": "alias", "target": "/help"}})
with patch.object(cli, "process_command", wraps=cli.process_command) as spy:
cli.process_command("/shortcut")
# Should recursively call process_command with /help
spy.assert_any_call("/help")
def test_alias_command_passes_args(self):
"""Alias quick commands forward user arguments to the target."""
cli = self._make_cli({"sc": {"type": "alias", "target": "/context"}})
with patch.object(cli, "process_command", wraps=cli.process_command) as spy:
cli.process_command("/sc some args")
spy.assert_any_call("/context some args")
def test_alias_no_target_shows_error(self):
cli = self._make_cli({"broken": {"type": "alias", "target": ""}})
cli.process_command("/broken")
cli.console.print.assert_called_once()
args = cli.console.print.call_args[0][0]
assert "no target defined" in args.lower()
def test_unsupported_type_shows_error(self):
cli = self._make_cli({"bad": {"type": "prompt", "command": "echo hi"}})
cli.process_command("/bad")

View File

@@ -55,6 +55,7 @@ class TestRealSubagentInterrupt(unittest.TestCase):
parent._interrupt_requested = False
parent._interrupt_message = None
parent._active_children = []
parent._active_children_lock = threading.Lock()
parent.quiet_mode = True
parent.model = "test/model"
parent.base_url = "http://localhost:1"
@@ -103,19 +104,28 @@ class TestRealSubagentInterrupt(unittest.TestCase):
return original_run(self_agent, *args, **kwargs)
with patch.object(AIAgent, 'run_conversation', patched_run):
# Build a real child agent (AIAgent is NOT patched here,
# only run_conversation and _build_system_prompt are)
child = AIAgent(
base_url="http://localhost:1",
api_key="test-key",
model="test/model",
provider="test",
api_mode="chat_completions",
max_iterations=5,
enabled_toolsets=["terminal"],
quiet_mode=True,
skip_context_files=True,
skip_memory=True,
platform="cli",
)
child._delegate_depth = 1
parent._active_children.append(child)
result = _run_single_child(
task_index=0,
goal="Test task",
context=None,
toolsets=["terminal"],
model="test/model",
max_iterations=5,
child=child,
parent_agent=parent,
task_count=1,
override_provider="test",
override_base_url="http://localhost:1",
override_api_key="test",
override_api_mode="chat_completions",
)
result_holder[0] = result
except Exception as e:

View File

@@ -12,6 +12,7 @@ Run with: python -m pytest tests/test_delegate.py -v
import json
import os
import sys
import threading
import unittest
from unittest.mock import MagicMock, patch
@@ -44,6 +45,7 @@ def _make_mock_parent(depth=0):
parent._session_db = None
parent._delegate_depth = depth
parent._active_children = []
parent._active_children_lock = threading.Lock()
return parent
@@ -722,7 +724,12 @@ class TestDelegationProviderIntegration(unittest.TestCase):
}
parent = _make_mock_parent(depth=0)
with patch("tools.delegate_tool._run_single_child") as mock_run:
# Patch _build_child_agent since credentials are now passed there
# (agents are built in the main thread before being handed to workers)
with patch("tools.delegate_tool._build_child_agent") as mock_build, \
patch("tools.delegate_tool._run_single_child") as mock_run:
mock_child = MagicMock()
mock_build.return_value = mock_child
mock_run.return_value = {
"task_index": 0, "status": "completed",
"summary": "Done", "api_calls": 1, "duration_seconds": 1.0
@@ -731,7 +738,8 @@ class TestDelegationProviderIntegration(unittest.TestCase):
tasks = [{"goal": "Task A"}, {"goal": "Task B"}]
delegate_task(tasks=tasks, parent_agent=parent)
for call in mock_run.call_args_list:
self.assertEqual(mock_build.call_count, 2)
for call in mock_build.call_args_list:
self.assertEqual(call.kwargs.get("model"), "meta-llama/llama-4-scout")
self.assertEqual(call.kwargs.get("override_provider"), "openrouter")
self.assertEqual(call.kwargs.get("override_base_url"), "https://openrouter.ai/api/v1")

View File

@@ -16,13 +16,10 @@ The parent's context only sees the delegation call and the summary result,
never the child's intermediate tool calls or reasoning.
"""
import contextlib
import io
import json
import logging
logger = logging.getLogger(__name__)
import os
import sys
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Dict, List, Optional
@@ -150,7 +147,7 @@ def _build_child_progress_callback(task_index: int, parent_agent, task_count: in
return _callback
def _run_single_child(
def _build_child_agent(
task_index: int,
goal: str,
context: Optional[str],
@@ -158,16 +155,15 @@ def _run_single_child(
model: Optional[str],
max_iterations: int,
parent_agent,
task_count: int = 1,
# Credential overrides from delegation config (provider:model resolution)
override_provider: Optional[str] = None,
override_base_url: Optional[str] = None,
override_api_key: Optional[str] = None,
override_api_mode: Optional[str] = None,
) -> Dict[str, Any]:
):
"""
Spawn and run a single child agent. Called from within a thread.
Returns a structured result dict.
Build a child AIAgent on the main thread (thread-safe construction).
Returns the constructed child agent without running it.
When override_* params are set (from delegation config), the child uses
those credentials instead of inheriting from the parent. This enables
@@ -176,8 +172,6 @@ def _run_single_child(
"""
from run_agent import AIAgent
child_start = time.monotonic()
# When no explicit toolsets given, inherit from parent's enabled toolsets
# so disabled tools (e.g. web) don't leak to subagents.
if toolsets:
@@ -188,65 +182,84 @@ def _run_single_child(
child_toolsets = _strip_blocked_tools(DEFAULT_TOOLSETS)
child_prompt = _build_child_system_prompt(goal, context)
# Extract parent's API key so subagents inherit auth (e.g. Nous Portal).
parent_api_key = getattr(parent_agent, "api_key", None)
if (not parent_api_key) and hasattr(parent_agent, "_client_kwargs"):
parent_api_key = parent_agent._client_kwargs.get("api_key")
try:
# Extract parent's API key so subagents inherit auth (e.g. Nous Portal).
parent_api_key = getattr(parent_agent, "api_key", None)
if (not parent_api_key) and hasattr(parent_agent, "_client_kwargs"):
parent_api_key = parent_agent._client_kwargs.get("api_key")
# Build progress callback to relay tool calls to parent display
child_progress_cb = _build_child_progress_callback(task_index, parent_agent)
# Build progress callback to relay tool calls to parent display
child_progress_cb = _build_child_progress_callback(task_index, parent_agent, task_count)
# Share the parent's iteration budget so subagent tool calls
# count toward the session-wide limit.
shared_budget = getattr(parent_agent, "iteration_budget", None)
# Share the parent's iteration budget so subagent tool calls
# count toward the session-wide limit.
shared_budget = getattr(parent_agent, "iteration_budget", None)
# Resolve effective credentials: config override > parent inherit
effective_model = model or parent_agent.model
effective_provider = override_provider or getattr(parent_agent, "provider", None)
effective_base_url = override_base_url or parent_agent.base_url
effective_api_key = override_api_key or parent_api_key
effective_api_mode = override_api_mode or getattr(parent_agent, "api_mode", None)
# Resolve effective credentials: config override > parent inherit
effective_model = model or parent_agent.model
effective_provider = override_provider or getattr(parent_agent, "provider", None)
effective_base_url = override_base_url or parent_agent.base_url
effective_api_key = override_api_key or parent_api_key
effective_api_mode = override_api_mode or getattr(parent_agent, "api_mode", None)
child = AIAgent(
base_url=effective_base_url,
api_key=effective_api_key,
model=effective_model,
provider=effective_provider,
api_mode=effective_api_mode,
max_iterations=max_iterations,
max_tokens=getattr(parent_agent, "max_tokens", None),
reasoning_config=getattr(parent_agent, "reasoning_config", None),
prefill_messages=getattr(parent_agent, "prefill_messages", None),
enabled_toolsets=child_toolsets,
quiet_mode=True,
ephemeral_system_prompt=child_prompt,
log_prefix=f"[subagent-{task_index}]",
platform=parent_agent.platform,
skip_context_files=True,
skip_memory=True,
clarify_callback=None,
session_db=getattr(parent_agent, '_session_db', None),
providers_allowed=parent_agent.providers_allowed,
providers_ignored=parent_agent.providers_ignored,
providers_order=parent_agent.providers_order,
provider_sort=parent_agent.provider_sort,
tool_progress_callback=child_progress_cb,
iteration_budget=shared_budget,
)
child = AIAgent(
base_url=effective_base_url,
api_key=effective_api_key,
model=effective_model,
provider=effective_provider,
api_mode=effective_api_mode,
max_iterations=max_iterations,
max_tokens=getattr(parent_agent, "max_tokens", None),
reasoning_config=getattr(parent_agent, "reasoning_config", None),
prefill_messages=getattr(parent_agent, "prefill_messages", None),
enabled_toolsets=child_toolsets,
quiet_mode=True,
ephemeral_system_prompt=child_prompt,
log_prefix=f"[subagent-{task_index}]",
platform=parent_agent.platform,
skip_context_files=True,
skip_memory=True,
clarify_callback=None,
session_db=getattr(parent_agent, '_session_db', None),
providers_allowed=parent_agent.providers_allowed,
providers_ignored=parent_agent.providers_ignored,
providers_order=parent_agent.providers_order,
provider_sort=parent_agent.provider_sort,
tool_progress_callback=child_progress_cb,
iteration_budget=shared_budget,
)
# Set delegation depth so children can't spawn grandchildren
child._delegate_depth = getattr(parent_agent, '_delegate_depth', 0) + 1
# Set delegation depth so children can't spawn grandchildren
child._delegate_depth = getattr(parent_agent, '_delegate_depth', 0) + 1
# Register child for interrupt propagation
if hasattr(parent_agent, '_active_children'):
# Register child for interrupt propagation
if hasattr(parent_agent, '_active_children'):
lock = getattr(parent_agent, '_active_children_lock', None)
if lock:
with lock:
parent_agent._active_children.append(child)
else:
parent_agent._active_children.append(child)
# Run with stdout/stderr suppressed to prevent interleaved output
devnull = io.StringIO()
with contextlib.redirect_stdout(devnull), contextlib.redirect_stderr(devnull):
result = child.run_conversation(user_message=goal)
return child
def _run_single_child(
task_index: int,
goal: str,
child=None,
parent_agent=None,
**_kwargs,
) -> Dict[str, Any]:
"""
Run a pre-built child agent. Called from within a thread.
Returns a structured result dict.
"""
child_start = time.monotonic()
# Get the progress callback from the child agent
child_progress_cb = getattr(child, 'tool_progress_callback', None)
try:
result = child.run_conversation(user_message=goal)
# Flush any remaining batched progress to gateway
if child_progress_cb and hasattr(child_progress_cb, '_flush'):
@@ -355,11 +368,15 @@ def _run_single_child(
# Unregister child from interrupt propagation
if hasattr(parent_agent, '_active_children'):
try:
parent_agent._active_children.remove(child)
lock = getattr(parent_agent, '_active_children_lock', None)
if lock:
with lock:
parent_agent._active_children.remove(child)
else:
parent_agent._active_children.remove(child)
except (ValueError, UnboundLocalError) as e:
logger.debug("Could not remove child from active_children: %s", e)
def delegate_task(
goal: Optional[str] = None,
context: Optional[str] = None,
@@ -428,51 +445,38 @@ def delegate_task(
# Track goal labels for progress display (truncated for readability)
task_labels = [t["goal"][:40] for t in task_list]
if n_tasks == 1:
# Single task -- run directly (no thread pool overhead)
t = task_list[0]
result = _run_single_child(
task_index=0,
goal=t["goal"],
context=t.get("context"),
toolsets=t.get("toolsets") or toolsets,
model=creds["model"],
max_iterations=effective_max_iter,
parent_agent=parent_agent,
task_count=1,
override_provider=creds["provider"],
override_base_url=creds["base_url"],
# Build all child agents on the main thread (thread-safe construction)
children = []
for i, t in enumerate(task_list):
child = _build_child_agent(
task_index=i, goal=t["goal"], context=t.get("context"),
toolsets=t.get("toolsets") or toolsets, model=creds["model"],
max_iterations=effective_max_iter, parent_agent=parent_agent,
override_provider=creds["provider"], override_base_url=creds["base_url"],
override_api_key=creds["api_key"],
override_api_mode=creds["api_mode"],
)
children.append((i, t, child))
if n_tasks == 1:
# Single task -- run directly (no thread pool overhead)
_i, _t, child = children[0]
result = _run_single_child(0, _t["goal"], child, parent_agent)
results.append(result)
else:
# Batch -- run in parallel with per-task progress lines
completed_count = 0
spinner_ref = getattr(parent_agent, '_delegate_spinner', None)
# Save stdout/stderr before the executor — redirect_stdout in child
# threads races on sys.stdout and can leave it as devnull permanently.
_saved_stdout = sys.stdout
_saved_stderr = sys.stderr
with ThreadPoolExecutor(max_workers=MAX_CONCURRENT_CHILDREN) as executor:
futures = {}
for i, t in enumerate(task_list):
for i, t, child in children:
future = executor.submit(
_run_single_child,
task_index=i,
goal=t["goal"],
context=t.get("context"),
toolsets=t.get("toolsets") or toolsets,
model=creds["model"],
max_iterations=effective_max_iter,
child=child,
parent_agent=parent_agent,
task_count=n_tasks,
override_provider=creds["provider"],
override_base_url=creds["base_url"],
override_api_key=creds["api_key"],
override_api_mode=creds["api_mode"],
)
futures[future] = i
@@ -515,10 +519,6 @@ def delegate_task(
except Exception as e:
logger.debug("Spinner update_text failed: %s", e)
# Restore stdout/stderr in case redirect_stdout race left them as devnull
sys.stdout = _saved_stdout
sys.stderr = _saved_stderr
# Sort by task_index so results match input order
results.sort(key=lambda r: r["task_index"])